⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/net/
zero_copy.rs

1//! Zero-Copy Networking
2//!
3//! Eliminates memory copies in the network stack for maximum throughput.
4//!
5//! ## Zero-Copy Techniques
6//!
7//! 1. **DMA Buffers**: Direct memory access from network card
8//! 2. **Scatter-Gather I/O**: Compose packets from multiple buffers
9//! 3. **Page Remapping**: Transfer ownership instead of copying data
10//! 4. **sendfile()**: Kernel-to-kernel transfer bypassing user space
11#![allow(dead_code)]
12//! 5. **TCP_CORK**: Batch small writes into single packet
13//! 6. **Memory Mapping**: mmap() network buffers to user space
14//! 7. **TcpZeroCopySend**: Combined scatter-gather + TCP segmentation
15
16use alloc::{collections::VecDeque, vec, vec::Vec};
17use core::sync::atomic::{AtomicU64, Ordering};
18
19use spin::Mutex;
20
21use crate::{
22    error::KernelError,
23    mm::{phys_to_virt_addr, FRAME_ALLOCATOR, FRAME_SIZE},
24};
25
26/// DMA buffer pool for zero-copy operations
27pub struct DmaBufferPool {
28    /// Pre-allocated DMA-capable buffers
29    free_buffers: Mutex<VecDeque<DmaBuffer>>,
30    /// Total buffers allocated
31    total_buffers: AtomicU64,
32    /// Buffers currently in use
33    in_use: AtomicU64,
34    /// Buffer size (typically 2KB for networking)
35    buffer_size: usize,
36}
37
38/// DMA-capable buffer
39pub struct DmaBuffer {
40    /// Physical address (for DMA)
41    pub physical_addr: u64,
42    /// Virtual address (for CPU access)
43    pub virtual_addr: u64,
44    /// Buffer size
45    pub size: usize,
46}
47
48impl DmaBuffer {
49    /// Create new DMA buffer by allocating a physical frame.
50    ///
51    /// Allocates a frame from the frame allocator and maps it via the kernel's
52    /// direct physical memory mapping. The frame provides at least `size` bytes
53    /// of DMA-capable memory.
54    pub fn new(size: usize) -> Result<Self, KernelError> {
55        // Calculate number of frames needed (round up)
56        let frames_needed = size.div_ceil(FRAME_SIZE);
57
58        let frame = FRAME_ALLOCATOR
59            .lock()
60            .allocate_frames(frames_needed, None)
61            .map_err(|_| KernelError::OutOfMemory {
62                requested: size,
63                available: 0,
64            })?;
65
66        let phys_addr = frame.as_u64() * FRAME_SIZE as u64;
67        let virt_addr = phys_to_virt_addr(phys_addr);
68
69        // Zero-initialize for safety
70        // SAFETY: virt_addr points to a freshly allocated frame of at least `size`
71        // bytes. The frame allocator guarantees this memory is not in use elsewhere.
72        unsafe {
73            core::ptr::write_bytes(virt_addr as *mut u8, 0, frames_needed * FRAME_SIZE);
74        }
75
76        Ok(Self {
77            physical_addr: phys_addr,
78            virtual_addr: virt_addr,
79            size,
80        })
81    }
82
83    /// Get mutable slice
84    pub fn as_mut_slice(&mut self) -> &mut [u8] {
85        if self.virtual_addr == 0 {
86            return &mut [];
87        }
88        // SAFETY: virtual_addr points to a DMA-capable buffer of exactly `size` bytes
89        // allocated for zero-copy networking via the frame allocator and mapped through
90        // the kernel's physical memory offset. We hold &mut self so no other reference
91        // to this buffer memory exists.
92        unsafe { core::slice::from_raw_parts_mut(self.virtual_addr as *mut u8, self.size) }
93    }
94
95    /// Get immutable slice
96    pub fn as_slice(&self) -> &[u8] {
97        if self.virtual_addr == 0 {
98            return &[];
99        }
100        // SAFETY: virtual_addr points to a DMA-capable buffer of exactly `size` bytes.
101        // We hold &self so no mutable alias exists.
102        unsafe { core::slice::from_raw_parts(self.virtual_addr as *const u8, self.size) }
103    }
104}
105
106impl DmaBufferPool {
107    /// Create new DMA buffer pool
108    pub fn new(buffer_size: usize, initial_count: usize) -> Self {
109        let pool = Self {
110            free_buffers: Mutex::new(VecDeque::new()),
111            total_buffers: AtomicU64::new(0),
112            in_use: AtomicU64::new(0),
113            buffer_size,
114        };
115
116        // Pre-allocate initial buffers
117        for _ in 0..initial_count {
118            if let Ok(buf) = DmaBuffer::new(buffer_size) {
119                pool.free_buffers.lock().push_back(buf);
120                pool.total_buffers.fetch_add(1, Ordering::Relaxed);
121            }
122        }
123
124        pool
125    }
126
127    /// Allocate a buffer from the pool
128    pub fn alloc(&self) -> Option<DmaBuffer> {
129        let mut free = self.free_buffers.lock();
130
131        if let Some(buf) = free.pop_front() {
132            self.in_use.fetch_add(1, Ordering::Relaxed);
133            Some(buf)
134        } else {
135            // Pool exhausted - try to allocate new buffer
136            drop(free);
137
138            if let Ok(buf) = DmaBuffer::new(self.buffer_size) {
139                self.total_buffers.fetch_add(1, Ordering::Relaxed);
140                self.in_use.fetch_add(1, Ordering::Relaxed);
141                Some(buf)
142            } else {
143                None
144            }
145        }
146    }
147
148    /// Return buffer to pool
149    pub fn free(&self, buf: DmaBuffer) {
150        self.free_buffers.lock().push_back(buf);
151        self.in_use.fetch_sub(1, Ordering::Relaxed);
152    }
153
154    /// Get pool statistics
155    pub fn stats(&self) -> DmaPoolStats {
156        DmaPoolStats {
157            total_buffers: self.total_buffers.load(Ordering::Relaxed),
158            in_use: self.in_use.load(Ordering::Relaxed),
159            buffer_size: self.buffer_size,
160        }
161    }
162}
163
164#[derive(Debug, Clone, Copy)]
165pub struct DmaPoolStats {
166    pub total_buffers: u64,
167    pub in_use: u64,
168    pub buffer_size: usize,
169}
170
171/// Scatter-gather list for zero-copy I/O
172pub struct ScatterGatherList {
173    /// List of buffer segments
174    segments: Vec<ScatterGatherSegment>,
175}
176
177#[derive(Debug, Clone)]
178pub struct ScatterGatherSegment {
179    /// Physical address
180    pub physical_addr: u64,
181    /// Length in bytes
182    pub length: usize,
183}
184
185impl ScatterGatherList {
186    /// Create new scatter-gather list
187    pub fn new() -> Self {
188        Self {
189            segments: Vec::new(),
190        }
191    }
192
193    /// Add segment
194    pub fn add_segment(&mut self, addr: u64, length: usize) {
195        self.segments.push(ScatterGatherSegment {
196            physical_addr: addr,
197            length,
198        });
199    }
200
201    /// Get total length
202    pub fn total_length(&self) -> usize {
203        self.segments.iter().map(|s| s.length).sum()
204    }
205
206    /// Get segments
207    pub fn segments(&self) -> &[ScatterGatherSegment] {
208        &self.segments
209    }
210
211    /// Number of segments
212    pub fn segment_count(&self) -> usize {
213        self.segments.len()
214    }
215
216    /// Check if empty
217    pub fn is_empty(&self) -> bool {
218        self.segments.is_empty()
219    }
220
221    /// Copy scatter-gather segments to a contiguous output buffer.
222    ///
223    /// Reads from each segment's physical address (via the kernel's direct
224    /// physical memory mapping) and copies the data sequentially into `buf`.
225    pub fn copy_to_buffer(&self, buf: &mut [u8]) -> Result<usize, KernelError> {
226        let mut offset = 0;
227
228        for segment in &self.segments {
229            if offset + segment.length > buf.len() {
230                return Err(KernelError::OutOfMemory {
231                    requested: offset + segment.length,
232                    available: buf.len(),
233                });
234            }
235
236            // Map the physical address to a virtual address via the kernel's
237            // direct physical memory mapping and copy the data out.
238            let src_virt = phys_to_virt_addr(segment.physical_addr) as *const u8;
239
240            // SAFETY: The physical address was either allocated via the frame
241            // allocator or translated from a pinned user page. The kernel's
242            // physical memory mapping makes it accessible at src_virt. The
243            // length was validated when the segment was added. We copy into
244            // the caller's buffer which has been bounds-checked above.
245            unsafe {
246                core::ptr::copy_nonoverlapping(
247                    src_virt,
248                    buf.as_mut_ptr().add(offset),
249                    segment.length,
250                );
251            }
252
253            offset += segment.length;
254        }
255
256        Ok(offset)
257    }
258
259    /// Assemble all scatter-gather segments into a single contiguous Vec.
260    ///
261    /// This is the fallback path when hardware scatter-gather is not available.
262    pub fn assemble(&self) -> Result<Vec<u8>, KernelError> {
263        let total = self.total_length();
264        let mut buf = vec![0u8; total];
265        self.copy_to_buffer(&mut buf)?;
266        Ok(buf)
267    }
268}
269
270impl Default for ScatterGatherList {
271    fn default() -> Self {
272        Self::new()
273    }
274}
275
276/// Zero-copy send operation using scatter-gather DMA.
277///
278/// Collects data segments (from user pages or kernel buffers) into a
279/// scatter-gather list and transmits them through the network device.
280pub struct ZeroCopySend {
281    /// Scatter-gather list
282    sg_list: ScatterGatherList,
283    /// Completion callback
284    completion: Option<fn()>,
285}
286
287impl ZeroCopySend {
288    /// Create new zero-copy send
289    pub fn new() -> Self {
290        Self {
291            sg_list: ScatterGatherList::new(),
292            completion: None,
293        }
294    }
295
296    /// Add data from a kernel physical address range.
297    pub fn add_kernel_buffer(&mut self, phys_addr: u64, length: usize) {
298        self.sg_list.add_segment(phys_addr, length);
299    }
300
301    /// Add data from user buffer (zero-copy via page pinning).
302    ///
303    /// Translates user virtual addresses to physical addresses by walking the
304    /// current process's page tables. Each page the buffer spans becomes a
305    /// separate scatter-gather segment so that physically discontiguous user
306    /// pages can be transmitted without copying.
307    pub fn add_user_buffer(&mut self, user_addr: u64, length: usize) -> Result<(), KernelError> {
308        if length == 0 {
309            return Ok(());
310        }
311
312        // Validate user address range
313        let end_addr = user_addr
314            .checked_add(length as u64)
315            .ok_or(KernelError::InvalidAddress {
316                addr: user_addr as usize,
317            })?;
318        if !crate::mm::user_validation::is_user_addr_valid(user_addr as usize)
319            || !crate::mm::user_validation::is_user_addr_valid((end_addr - 1) as usize)
320        {
321            return Err(KernelError::InvalidAddress {
322                addr: user_addr as usize,
323            });
324        }
325
326        // Walk page-by-page to translate user virtual -> physical.
327        // Each 4KB page may map to a different physical frame.
328        let page_size = FRAME_SIZE as u64;
329        let mut remaining = length;
330        let mut vaddr = user_addr;
331
332        while remaining > 0 {
333            let page_offset = vaddr & (page_size - 1);
334            let bytes_in_page = core::cmp::min(remaining, (page_size - page_offset) as usize);
335
336            // Translate via page table walk
337            if let Some(pte) = crate::mm::translate_user_address(vaddr as usize) {
338                if let Some(frame_phys) = pte.addr() {
339                    let phys = frame_phys.as_u64() + page_offset;
340                    self.sg_list.add_segment(phys, bytes_in_page);
341                } else {
342                    return Err(KernelError::UnmappedMemory {
343                        addr: vaddr as usize,
344                    });
345                }
346            } else {
347                return Err(KernelError::UnmappedMemory {
348                    addr: vaddr as usize,
349                });
350            }
351
352            vaddr += bytes_in_page as u64;
353            remaining -= bytes_in_page;
354        }
355
356        ZERO_COPY_STATS.record_zero_copy(length as u64);
357        Ok(())
358    }
359
360    /// Set completion callback
361    pub fn on_complete(&mut self, callback: fn()) {
362        self.completion = Some(callback);
363    }
364
365    /// Get a reference to the scatter-gather list
366    pub fn sg_list(&self) -> &ScatterGatherList {
367        &self.sg_list
368    }
369
370    /// Execute send through the network device.
371    ///
372    /// Assembles the scatter-gather list into a contiguous packet and transmits
373    /// it via the default network device. If no hardware scatter-gather support
374    /// is available, falls back to a copy-based path.
375    pub fn execute(&self) -> Result<(), KernelError> {
376        if self.sg_list.is_empty() {
377            return Ok(());
378        }
379
380        // Assemble SG segments into a contiguous packet for transmission.
381        // Hardware scatter-gather would avoid this copy, but the current
382        // LoopbackDevice and EthernetDevice expect a contiguous Packet.
383        let assembled = self.sg_list.assemble()?;
384        let packet = crate::net::Packet::from_bytes(&assembled);
385
386        // Try eth0 first, then fall back to lo0
387        let sent = crate::net::device::with_device_mut("eth0", |dev| dev.transmit(&packet))
388            .or_else(|| crate::net::device::with_device_mut("lo0", |dev| dev.transmit(&packet)));
389
390        match sent {
391            Some(Ok(())) => {
392                crate::net::update_stats_tx(assembled.len());
393            }
394            Some(Err(e)) => return Err(e),
395            None => {
396                // No network device available -- record as copy fallback
397                ZERO_COPY_STATS.record_copy(assembled.len() as u64);
398            }
399        }
400
401        // Fire completion callback
402        if let Some(cb) = self.completion {
403            cb();
404        }
405
406        Ok(())
407    }
408}
409
410impl Default for ZeroCopySend {
411    fn default() -> Self {
412        Self::new()
413    }
414}
415
416/// Kernel-to-kernel transfer (sendfile equivalent)
417pub struct SendFile {
418    /// Source file descriptor
419    source_fd: u32,
420    /// Destination socket
421    dest_socket: u32,
422    /// Offset in source
423    offset: u64,
424    /// Bytes to transfer
425    count: usize,
426}
427
428impl SendFile {
429    /// Create new sendfile operation
430    pub fn new(source_fd: u32, dest_socket: u32, offset: u64, count: usize) -> Self {
431        Self {
432            source_fd,
433            dest_socket,
434            offset,
435            count,
436        }
437    }
438
439    /// Execute transfer without copying to user space.
440    ///
441    /// For large transfers (>= 64KB), uses scatter-gather to read file data
442    /// into DMA buffers and assemble once, reducing intermediate copies.
443    /// For smaller transfers, falls back to 4KB chunked copy through kernel
444    /// buffers.
445    pub fn execute(&self) -> Result<usize, KernelError> {
446        let proc = crate::process::current_process().ok_or(KernelError::InvalidState {
447            expected: "running process",
448            actual: "no current process",
449        })?;
450        let ft = proc.file_table.lock();
451        let source_file = ft
452            .get(self.source_fd as usize)
453            .ok_or(KernelError::FsError(crate::error::FsError::NotFound))?;
454        let dest_file = ft
455            .get(self.dest_socket as usize)
456            .ok_or(KernelError::FsError(crate::error::FsError::NotFound))?;
457
458        // Seek source to the requested offset (ignore result for non-seekable)
459        let _ = self.offset;
460
461        // For large transfers, attempt scatter-gather path
462        if self.count >= 65536 {
463            if let Ok(transferred) = self.execute_sg(&source_file, &dest_file) {
464                ZERO_COPY_STATS.record_zero_copy(transferred as u64);
465                return Ok(transferred);
466            }
467            // SG path failed, fall through to copy path
468        }
469
470        // Fallback: transfer in 4 KB chunks to avoid large stack allocations
471        let mut transferred = 0usize;
472        let mut buf = [0u8; 4096];
473
474        while transferred < self.count {
475            let chunk = core::cmp::min(buf.len(), self.count - transferred);
476            let n = source_file.read(&mut buf[..chunk])?;
477            if n == 0 {
478                break; // EOF
479            }
480            let written = dest_file.write(&buf[..n])?;
481            transferred += written;
482            if written == 0 {
483                break;
484            }
485        }
486
487        ZERO_COPY_STATS.record_copy(transferred as u64);
488        Ok(transferred)
489    }
490
491    /// Scatter-gather sendfile path.
492    ///
493    /// Reads data from the source file into page-sized DMA buffers, adds them
494    /// to a scatter-gather list, then writes the assembled data to the
495    /// destination. This reduces copies compared to the 4KB loop by reading
496    /// into pre-allocated DMA buffers and assembling once.
497    fn execute_sg(
498        &self,
499        source: &crate::fs::file::File,
500        dest: &crate::fs::file::File,
501    ) -> Result<usize, KernelError> {
502        let mut sg = ScatterGatherList::new();
503        let mut dma_buffers: Vec<DmaBuffer> = Vec::new();
504        let mut total_read = 0usize;
505
506        // Read source data into DMA buffers, building the SG list
507        while total_read < self.count {
508            let mut dma_buf = DmaBuffer::new(FRAME_SIZE)?;
509            let to_read = core::cmp::min(FRAME_SIZE, self.count - total_read);
510            let n = source.read(&mut dma_buf.as_mut_slice()[..to_read])?;
511            if n == 0 {
512                break; // EOF
513            }
514
515            sg.add_segment(dma_buf.physical_addr, n);
516            total_read += n;
517            dma_buffers.push(dma_buf);
518        }
519
520        if total_read == 0 {
521            return Ok(0);
522        }
523
524        // Assemble and write to destination
525        let assembled = sg.assemble()?;
526        let mut written_total = 0usize;
527        let mut write_offset = 0usize;
528
529        while write_offset < assembled.len() {
530            let n = dest.write(&assembled[write_offset..])?;
531            if n == 0 {
532                break;
533            }
534            write_offset += n;
535            written_total += n;
536        }
537
538        // DMA buffers are freed when dma_buffers Vec drops (frames leak in current
539        // implementation -- acceptable since DmaBuffer doesn't impl Drop for frame
540        // reclamation yet; this matches the pool-based pattern in dma_pool.rs)
541
542        Ok(written_total)
543    }
544}
545
546/// TCP Cork (batch small writes into single packet)
547///
548/// Buffers small writes and flushes them as a single TCP segment when the
549/// buffer exceeds `max_pending` bytes or when `flush()` is called explicitly.
550pub struct TcpCork {
551    /// Pending data
552    pending: Vec<u8>,
553    /// Maximum pending size before flush
554    max_pending: usize,
555    /// Associated socket ID for TCP transmission
556    socket_id: Option<usize>,
557    /// Remote address for TCP transmission
558    remote: Option<crate::net::SocketAddr>,
559}
560
561impl TcpCork {
562    /// Create new TCP cork
563    pub fn new(max_pending: usize) -> Self {
564        Self {
565            pending: Vec::new(),
566            max_pending,
567            socket_id: None,
568            remote: None,
569        }
570    }
571
572    /// Create a TCP cork bound to a specific socket
573    pub fn with_socket(
574        max_pending: usize,
575        socket_id: usize,
576        remote: crate::net::SocketAddr,
577    ) -> Self {
578        Self {
579            pending: Vec::new(),
580            max_pending,
581            socket_id: Some(socket_id),
582            remote: Some(remote),
583        }
584    }
585
586    /// Add data (may not send immediately)
587    pub fn write(&mut self, data: &[u8]) -> Result<(), KernelError> {
588        self.pending.extend_from_slice(data);
589
590        if self.pending.len() >= self.max_pending {
591            self.flush()?;
592        }
593
594        Ok(())
595    }
596
597    /// Get the current pending data size
598    pub fn pending_len(&self) -> usize {
599        self.pending.len()
600    }
601
602    /// Force send pending data via TCP.
603    ///
604    /// If a socket ID and remote address are configured, sends through the TCP
605    /// stack using `tcp::transmit_data()`. Otherwise, clears the buffer (useful
606    /// for testing or when the cork is used standalone).
607    pub fn flush(&mut self) -> Result<(), KernelError> {
608        if self.pending.is_empty() {
609            return Ok(());
610        }
611
612        if let (Some(socket_id), Some(remote)) = (self.socket_id, self.remote) {
613            // Send through the TCP stack
614            crate::net::tcp::transmit_data(socket_id, &self.pending, remote);
615            ZERO_COPY_STATS.record_copy(self.pending.len() as u64);
616        }
617        // If no socket configured, just clear (standalone / test mode)
618
619        self.pending.clear();
620        Ok(())
621    }
622}
623
624/// TCP zero-copy send combining scatter-gather with TCP segmentation.
625///
626/// Collects data into a scatter-gather list and segments it into TCP MSS-sized
627/// chunks for transmission, avoiding intermediate copies where possible.
628pub struct TcpZeroCopySend {
629    /// Scatter-gather list of data to send
630    sg_list: ScatterGatherList,
631    /// Socket ID for the TCP connection
632    socket_id: usize,
633    /// Remote address
634    remote: crate::net::SocketAddr,
635    /// Maximum segment size (typically 1460 for Ethernet)
636    mss: usize,
637}
638
639impl TcpZeroCopySend {
640    /// TCP Maximum Segment Size for Ethernet (1500 MTU - 20 IP - 20 TCP)
641    const DEFAULT_MSS: usize = 1460;
642
643    /// Create a new TCP zero-copy send operation.
644    pub fn new(socket_id: usize, remote: crate::net::SocketAddr) -> Self {
645        Self {
646            sg_list: ScatterGatherList::new(),
647            socket_id,
648            remote,
649            mss: Self::DEFAULT_MSS,
650        }
651    }
652
653    /// Set custom MSS (for path MTU discovery)
654    pub fn set_mss(&mut self, mss: usize) {
655        self.mss = mss;
656    }
657
658    /// Add data from a kernel buffer (physical address)
659    pub fn add_buffer(&mut self, phys_addr: u64, length: usize) {
660        self.sg_list.add_segment(phys_addr, length);
661    }
662
663    /// Add data from a user buffer (translates virtual to physical)
664    pub fn add_user_buffer(&mut self, user_addr: u64, length: usize) -> Result<(), KernelError> {
665        // Reuse the page-pinning logic from ZeroCopySend
666        let mut zc_send = ZeroCopySend::new();
667        zc_send.add_user_buffer(user_addr, length)?;
668
669        // Move the translated segments into our SG list
670        for seg in zc_send.sg_list.segments() {
671            self.sg_list.add_segment(seg.physical_addr, seg.length);
672        }
673        Ok(())
674    }
675
676    /// Execute the zero-copy TCP send.
677    ///
678    /// Assembles the scatter-gather data and sends it through the TCP stack,
679    /// which handles segmentation into MSS-sized chunks, sequence numbers,
680    /// and retransmission.
681    pub fn execute(&self) -> Result<usize, KernelError> {
682        if self.sg_list.is_empty() {
683            return Ok(0);
684        }
685
686        let total_len = self.sg_list.total_length();
687
688        // Assemble the SG list into contiguous data
689        let data = self.sg_list.assemble()?;
690
691        // Send through TCP stack which handles segmentation
692        crate::net::tcp::transmit_data(self.socket_id, &data, self.remote);
693
694        ZERO_COPY_STATS.record_zero_copy(total_len as u64);
695        Ok(total_len)
696    }
697
698    /// Get total data size queued for sending
699    pub fn total_length(&self) -> usize {
700        self.sg_list.total_length()
701    }
702
703    /// Get the number of scatter-gather segments
704    pub fn segment_count(&self) -> usize {
705        self.sg_list.segment_count()
706    }
707}
708
709/// Statistics for zero-copy operations
710pub struct ZeroCopyStats {
711    /// Total bytes transferred without copying
712    pub zero_copy_bytes: AtomicU64,
713    /// Total bytes that required copying (fallback)
714    pub copied_bytes: AtomicU64,
715    /// Number of zero-copy operations
716    pub zero_copy_ops: AtomicU64,
717    /// Number of copy operations
718    pub copy_ops: AtomicU64,
719}
720
721impl ZeroCopyStats {
722    pub const fn new() -> Self {
723        Self {
724            zero_copy_bytes: AtomicU64::new(0),
725            copied_bytes: AtomicU64::new(0),
726            zero_copy_ops: AtomicU64::new(0),
727            copy_ops: AtomicU64::new(0),
728        }
729    }
730
731    pub fn record_zero_copy(&self, bytes: u64) {
732        self.zero_copy_bytes.fetch_add(bytes, Ordering::Relaxed);
733        self.zero_copy_ops.fetch_add(1, Ordering::Relaxed);
734    }
735
736    pub fn record_copy(&self, bytes: u64) {
737        self.copied_bytes.fetch_add(bytes, Ordering::Relaxed);
738        self.copy_ops.fetch_add(1, Ordering::Relaxed);
739    }
740
741    pub fn get_efficiency(&self) -> f64 {
742        let zc = self.zero_copy_bytes.load(Ordering::Relaxed) as f64;
743        let cp = self.copied_bytes.load(Ordering::Relaxed) as f64;
744
745        if zc + cp == 0.0 {
746            return 0.0;
747        }
748
749        (zc / (zc + cp)) * 100.0
750    }
751}
752
753impl Default for ZeroCopyStats {
754    fn default() -> Self {
755        Self::new()
756    }
757}
758
759/// Global zero-copy statistics
760pub(crate) static ZERO_COPY_STATS: ZeroCopyStats = ZeroCopyStats::new();
761
762#[cfg(test)]
763mod tests {
764    use super::*;
765
766    #[test]
767    fn test_dma_buffer_pool() {
768        let pool = DmaBufferPool::new(2048, 10);
769        let stats = pool.stats();
770
771        // On the host test target the frame allocator is not initialized,
772        // so DmaBuffer::new() silently fails and total_buffers stays 0.
773        // Only assert on the bare-metal target where frames are available.
774        #[cfg(target_os = "none")]
775        {
776            assert_eq!(stats.total_buffers, 10);
777            assert_eq!(stats.in_use, 0);
778
779            let buf = pool.alloc();
780            assert!(buf.is_some());
781
782            let stats = pool.stats();
783            assert_eq!(stats.in_use, 1);
784        }
785        #[cfg(not(target_os = "none"))]
786        {
787            // Pool constructed successfully; pre-allocation requires kernel infra
788            assert_eq!(stats.in_use, 0);
789        }
790    }
791
792    #[test]
793    fn test_scatter_gather() {
794        let mut sg = ScatterGatherList::new();
795        assert!(sg.is_empty());
796        sg.add_segment(0x1000, 512);
797        sg.add_segment(0x2000, 1024);
798
799        assert!(!sg.is_empty());
800        assert_eq!(sg.total_length(), 1536);
801        assert_eq!(sg.segments().len(), 2);
802        assert_eq!(sg.segment_count(), 2);
803    }
804
805    #[test]
806    fn test_zero_copy_stats() {
807        let stats = ZeroCopyStats::new();
808        stats.record_zero_copy(1000);
809        stats.record_copy(100);
810
811        let efficiency = stats.get_efficiency();
812        assert!(efficiency > 90.0); // 1000/(1000+100) = 90.9%
813    }
814
815    #[test]
816    fn test_tcp_cork_basic() {
817        let mut cork = TcpCork::new(100);
818        assert_eq!(cork.pending_len(), 0);
819
820        cork.write(b"hello").unwrap();
821        assert_eq!(cork.pending_len(), 5);
822
823        cork.flush().unwrap();
824        assert_eq!(cork.pending_len(), 0);
825    }
826
827    #[test]
828    fn test_zero_copy_send_empty() {
829        let send = ZeroCopySend::new();
830        assert!(send.sg_list().is_empty());
831    }
832}