⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/drivers/
virtio_net.rs

1//! VirtIO Network Driver
2//!
3//! Driver for paravirtualized network devices using the VirtIO protocol.
4//! Commonly used in QEMU/KVM virtual machines for high performance.
5//!
6//! Implements the VirtIO MMIO transport with proper status negotiation,
7//! virtqueue setup via frame allocator DMA buffers, and TX/RX paths.
8
9// Allow dead code for VirtIO feature bits and structures not yet fully implemented
10#![allow(dead_code, clippy::needless_range_loop)]
11
12use alloc::vec::Vec;
13
14use crate::{
15    error::KernelError,
16    net::{
17        device::{DeviceCapabilities, DeviceState, DeviceStatistics, NetworkDevice},
18        MacAddress, Packet,
19    },
20};
21
22/// VirtIO Network Device Feature Bits
23const VIRTIO_NET_F_CSUM: u64 = 1 << 0;
24const VIRTIO_NET_F_GUEST_CSUM: u64 = 1 << 1;
25const VIRTIO_NET_F_MAC: u64 = 1 << 5;
26const VIRTIO_NET_F_STATUS: u64 = 1 << 16;
27
28// ============================================================================
29// VirtIO MMIO Register Offsets (legacy interface)
30// ============================================================================
31const VIRTIO_MMIO_MAGIC: usize = 0x00;
32const VIRTIO_MMIO_VERSION: usize = 0x04;
33const VIRTIO_MMIO_DEVICE_ID: usize = 0x08;
34const VIRTIO_MMIO_DEVICE_FEATURES: usize = 0x10;
35const VIRTIO_MMIO_DEVICE_FEATURES_SEL: usize = 0x14;
36const VIRTIO_MMIO_DRIVER_FEATURES: usize = 0x20;
37const VIRTIO_MMIO_DRIVER_FEATURES_SEL: usize = 0x24;
38const VIRTIO_MMIO_QUEUE_SEL: usize = 0x30;
39const VIRTIO_MMIO_QUEUE_NUM_MAX: usize = 0x34;
40const VIRTIO_MMIO_QUEUE_NUM: usize = 0x38;
41const VIRTIO_MMIO_QUEUE_READY: usize = 0x44;
42const VIRTIO_MMIO_QUEUE_NOTIFY: usize = 0x50;
43const VIRTIO_MMIO_STATUS: usize = 0x70;
44const VIRTIO_MMIO_QUEUE_DESC_LOW: usize = 0x80;
45const VIRTIO_MMIO_QUEUE_DESC_HIGH: usize = 0x84;
46const VIRTIO_MMIO_QUEUE_AVAIL_LOW: usize = 0x90;
47const VIRTIO_MMIO_QUEUE_AVAIL_HIGH: usize = 0x94;
48const VIRTIO_MMIO_QUEUE_USED_LOW: usize = 0xA0;
49const VIRTIO_MMIO_QUEUE_USED_HIGH: usize = 0xA4;
50const VIRTIO_MMIO_CONFIG_BASE: usize = 0x100;
51
52// VirtIO status bits
53const VIRTIO_STATUS_ACKNOWLEDGE: u32 = 1;
54const VIRTIO_STATUS_DRIVER: u32 = 2;
55const VIRTIO_STATUS_DRIVER_OK: u32 = 4;
56const VIRTIO_STATUS_FEATURES_OK: u32 = 8;
57
58/// VirtIO Net header size (without mergeable buffers)
59const VIRTIO_NET_HDR_SIZE: usize = 10;
60
61/// Descriptor flags: buffer is device-writable (for RX buffers)
62const VIRTQ_DESC_F_WRITE: u16 = 2;
63
64/// VirtIO Network Header
65#[repr(C)]
66#[derive(Debug, Clone, Copy)]
67struct VirtioNetHeader {
68    flags: u8,
69    gso_type: u8,
70    hdr_len: u16,
71    gso_size: u16,
72    csum_start: u16,
73    csum_offset: u16,
74    num_buffers: u16,
75}
76
77/// VirtIO Ring Descriptor
78#[repr(C)]
79#[derive(Debug, Clone, Copy)]
80struct VirtqDesc {
81    addr: u64,
82    len: u32,
83    flags: u16,
84    next: u16,
85}
86
87/// VirtIO Ring Available
88#[repr(C)]
89struct VirtqAvail {
90    flags: u16,
91    idx: u16,
92    ring: [u16; 256],
93    used_event: u16,
94}
95
96/// VirtIO Ring Used Element
97#[repr(C)]
98#[derive(Debug, Clone, Copy)]
99struct VirtqUsedElem {
100    id: u32,
101    len: u32,
102}
103
104/// VirtIO Ring Used
105#[repr(C)]
106struct VirtqUsed {
107    flags: u16,
108    idx: u16,
109    ring: [VirtqUsedElem; 256],
110    avail_event: u16,
111}
112
113/// VirtIO Virtqueue
114struct Virtqueue {
115    /// Queue size (number of descriptors)
116    size: u16,
117
118    /// Descriptor table
119    descriptors: &'static mut [VirtqDesc],
120
121    /// Available ring
122    avail: &'static mut VirtqAvail,
123
124    /// Used ring
125    used: &'static mut VirtqUsed,
126
127    /// Free descriptor head
128    free_head: u16,
129
130    /// Last seen used index
131    last_used_idx: u16,
132
133    /// Number of free descriptors
134    num_free: u16,
135}
136
137impl Virtqueue {
138    /// Create a new virtqueue (requires pre-allocated memory)
139    fn new(
140        descriptors: &'static mut [VirtqDesc],
141        avail: &'static mut VirtqAvail,
142        used: &'static mut VirtqUsed,
143        size: u16,
144    ) -> Self {
145        // Initialize descriptor free list
146        for i in 0..size {
147            descriptors[i as usize].next = if i + 1 < size { i + 1 } else { 0 };
148        }
149
150        // Initialize rings
151        avail.flags = 0;
152        avail.idx = 0;
153        used.flags = 0;
154        used.idx = 0;
155
156        Self {
157            size,
158            descriptors,
159            avail,
160            used,
161            free_head: 0,
162            last_used_idx: 0,
163            num_free: size,
164        }
165    }
166
167    /// Allocate a descriptor
168    fn alloc_desc(&mut self) -> Option<u16> {
169        if self.num_free == 0 {
170            return None;
171        }
172
173        let desc_idx = self.free_head;
174        self.free_head = self.descriptors[desc_idx as usize].next;
175        self.num_free -= 1;
176
177        Some(desc_idx)
178    }
179
180    /// Free a descriptor
181    fn free_desc(&mut self, desc_idx: u16) {
182        self.descriptors[desc_idx as usize].next = self.free_head;
183        self.free_head = desc_idx;
184        self.num_free += 1;
185    }
186
187    /// Add buffer to available ring
188    fn add_to_avail(&mut self, desc_idx: u16) {
189        let avail_idx = self.avail.idx as usize % self.size as usize;
190        self.avail.ring[avail_idx] = desc_idx;
191
192        // Memory barrier would go here
193        core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
194
195        self.avail.idx = self.avail.idx.wrapping_add(1);
196    }
197
198    /// Check for used buffers
199    fn get_used(&mut self) -> Option<(u16, u32)> {
200        if self.last_used_idx == self.used.idx {
201            return None;
202        }
203
204        let used_idx = self.last_used_idx as usize % self.size as usize;
205        let used_elem = self.used.ring[used_idx];
206
207        self.last_used_idx = self.last_used_idx.wrapping_add(1);
208
209        Some((used_elem.id as u16, used_elem.len))
210    }
211}
212
213/// DMA buffer region backing a virtqueue.
214///
215/// Stores the virtual addresses of frame-allocator-provided pages
216/// used for the descriptor table, available ring, and used ring.
217struct VirtqueueDmaRegion {
218    /// Virtual address of allocated pages (for the desc/avail/used rings)
219    virt_addr: usize,
220    /// Number of 4KB pages allocated
221    num_pages: usize,
222}
223
224/// Per-descriptor TX/RX data buffer (single 4KB page).
225struct DataBuffer {
226    virt_addr: usize,
227    phys_addr: u64,
228}
229
230/// VirtIO Network Driver
231pub struct VirtioNetDriver {
232    mmio_base: usize,
233    mac_address: MacAddress,
234    features: u64,
235    rx_queue_size: u16,
236    tx_queue_size: u16,
237    state: DeviceState,
238    stats: DeviceStatistics,
239
240    // Virtqueues (None until initialized)
241    rx_queue: Option<Virtqueue>,
242    tx_queue: Option<Virtqueue>,
243
244    /// DMA region backing the RX virtqueue rings
245    rx_dma: Option<VirtqueueDmaRegion>,
246    /// DMA region backing the TX virtqueue rings
247    tx_dma: Option<VirtqueueDmaRegion>,
248    /// Per-descriptor data buffers for RX
249    rx_buffers: Vec<DataBuffer>,
250    /// Per-descriptor data buffers for TX
251    tx_buffers: Vec<DataBuffer>,
252}
253
254impl VirtioNetDriver {
255    /// Create a new VirtIO Network driver
256    pub fn new(mmio_base: usize) -> Result<Self, KernelError> {
257        let mut driver = Self {
258            mmio_base,
259            mac_address: MacAddress::ZERO,
260            features: 0,
261            rx_queue_size: 256,
262            tx_queue_size: 256,
263            state: DeviceState::Down,
264            stats: DeviceStatistics::default(),
265            rx_queue: None,
266            tx_queue: None,
267            rx_dma: None,
268            tx_dma: None,
269            rx_buffers: Vec::new(),
270            tx_buffers: Vec::new(),
271        };
272
273        driver.initialize()?;
274        Ok(driver)
275    }
276
277    /// Read from MMIO register
278    fn read_reg(&self, offset: usize) -> u32 {
279        // SAFETY: Reading a VirtIO MMIO register at mmio_base + offset. The mmio_base
280        // is the device's memory-mapped I/O base from the device tree or PCI BAR.
281        // read_volatile prevents compiler reordering of hardware register accesses.
282        unsafe { core::ptr::read_volatile((self.mmio_base + offset) as *const u32) }
283    }
284
285    /// Write to MMIO register
286    fn write_reg(&self, offset: usize, value: u32) {
287        // SAFETY: Writing a VirtIO MMIO register. Same invariants as read_reg.
288        unsafe {
289            core::ptr::write_volatile((self.mmio_base + offset) as *mut u32, value);
290        }
291    }
292
293    /// Initialize VirtIO device with full status negotiation and virtqueue
294    /// setup.
295    ///
296    /// Follows the VirtIO 1.0+ initialization sequence:
297    /// 1. Reset device
298    /// 2. Set ACKNOWLEDGE
299    /// 3. Set DRIVER
300    /// 4. Negotiate features
301    /// 5. Set FEATURES_OK and verify
302    /// 6. Set up virtqueues (RX queue 0, TX queue 1)
303    /// 7. Read MAC from device config
304    /// 8. Set DRIVER_OK
305    fn initialize(&mut self) -> Result<(), KernelError> {
306        // Step 1: Reset device
307        self.write_reg(VIRTIO_MMIO_STATUS, 0);
308
309        // Step 2: Set ACKNOWLEDGE status bit
310        self.write_reg(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE);
311
312        // Step 3: Set DRIVER status bit
313        self.write_reg(
314            VIRTIO_MMIO_STATUS,
315            VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER,
316        );
317
318        // Step 4: Read and negotiate features
319        self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
320        let features_low = self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64;
321        self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
322        let features_high = (self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64) << 32;
323        self.features = features_low | features_high;
324
325        let driver_features = VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS;
326        self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
327        self.write_reg(
328            VIRTIO_MMIO_DRIVER_FEATURES,
329            (driver_features & 0xFFFFFFFF) as u32,
330        );
331        self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
332        self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES, (driver_features >> 32) as u32);
333
334        // Step 5: Set FEATURES_OK and verify
335        self.write_reg(
336            VIRTIO_MMIO_STATUS,
337            VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER | VIRTIO_STATUS_FEATURES_OK,
338        );
339
340        if (self.read_reg(VIRTIO_MMIO_STATUS) & VIRTIO_STATUS_FEATURES_OK) == 0 {
341            return Err(KernelError::HardwareError {
342                device: "virtio-net",
343                code: 1,
344            });
345        }
346
347        // Step 6: Set up virtqueues
348        // RX queue = index 0, TX queue = index 1
349        self.setup_rx_queue()?;
350        self.setup_tx_queue()?;
351
352        // Step 7: Read MAC address from device config space
353        if (self.features & VIRTIO_NET_F_MAC) != 0 {
354            let mut mac = [0u8; 6];
355            for (i, byte) in mac.iter_mut().enumerate() {
356                *byte = self.read_reg(VIRTIO_MMIO_CONFIG_BASE + i) as u8;
357            }
358            self.mac_address = MacAddress(mac);
359        }
360
361        // Step 8: Set DRIVER_OK -- device is live
362        self.write_reg(
363            VIRTIO_MMIO_STATUS,
364            VIRTIO_STATUS_ACKNOWLEDGE
365                | VIRTIO_STATUS_DRIVER
366                | VIRTIO_STATUS_FEATURES_OK
367                | VIRTIO_STATUS_DRIVER_OK,
368        );
369
370        println!(
371            "[VIRTIO-NET] Initialized with MAC: {:02X}:{:02X}:{:02X}:{:02X}:{:02X}:{:02X}",
372            self.mac_address.0[0],
373            self.mac_address.0[1],
374            self.mac_address.0[2],
375            self.mac_address.0[3],
376            self.mac_address.0[4],
377            self.mac_address.0[5]
378        );
379        println!(
380            "[VIRTIO-NET] RX queue: {} descs, TX queue: {} descs",
381            self.rx_queue_size, self.tx_queue_size
382        );
383
384        self.state = DeviceState::Up;
385        Ok(())
386    }
387
388    /// Set up the RX virtqueue (queue index 0).
389    ///
390    /// Reads QueueNumMax from MMIO, allocates descriptor/available/used ring
391    /// memory from the frame allocator, and pre-populates the available ring
392    /// with receive buffers.
393    fn setup_rx_queue(&mut self) -> Result<(), KernelError> {
394        self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 0); // Select queue 0
395
396        let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
397        if max_size == 0 {
398            return Err(KernelError::HardwareError {
399                device: "virtio-net",
400                code: 2,
401            });
402        }
403        let queue_size = max_size.min(256);
404        self.rx_queue_size = queue_size;
405
406        // Allocate ring memory and data buffers, then create the Virtqueue
407        let (vq, dma, buffers) = self.allocate_virtqueue(queue_size, true)?;
408
409        // Tell device about the queue addresses
410        let desc_phys = dma.virt_addr as u64; // In identity-mapped or known offset region
411        let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
412        let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
413        let avail_phys = desc_phys + avail_offset as u64;
414        let used_phys = desc_phys + used_offset as u64;
415
416        self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
417        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
418        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
419        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
420        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
421        self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
422        self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
423        self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
424
425        self.rx_queue = Some(vq);
426        self.rx_dma = Some(dma);
427        self.rx_buffers = buffers;
428
429        Ok(())
430    }
431
432    /// Set up the TX virtqueue (queue index 1).
433    fn setup_tx_queue(&mut self) -> Result<(), KernelError> {
434        self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 1); // Select queue 1
435
436        let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
437        if max_size == 0 {
438            return Err(KernelError::HardwareError {
439                device: "virtio-net",
440                code: 3,
441            });
442        }
443        let queue_size = max_size.min(256);
444        self.tx_queue_size = queue_size;
445
446        let (vq, dma, buffers) = self.allocate_virtqueue(queue_size, false)?;
447
448        let desc_phys = dma.virt_addr as u64;
449        let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
450        let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
451        let avail_phys = desc_phys + avail_offset as u64;
452        let used_phys = desc_phys + used_offset as u64;
453
454        self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
455        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
456        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
457        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
458        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
459        self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
460        self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
461        self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
462
463        self.tx_queue = Some(vq);
464        self.tx_dma = Some(dma);
465        self.tx_buffers = buffers;
466
467        Ok(())
468    }
469
470    /// Allocate a virtqueue: ring memory + per-descriptor data buffers.
471    ///
472    /// For RX queues (`is_rx = true`), each descriptor is pre-configured to
473    /// point at a writable data buffer and added to the available ring so
474    /// the device can fill them with received packets.
475    fn allocate_virtqueue(
476        &self,
477        queue_size: u16,
478        is_rx: bool,
479    ) -> Result<(Virtqueue, VirtqueueDmaRegion, Vec<DataBuffer>), KernelError> {
480        let qs = queue_size as usize;
481
482        // Calculate total ring memory needed:
483        //   descriptors: qs * 16 bytes
484        //   avail ring: 2+2 + qs*2 + 2 = 6 + 2*qs bytes
485        //   used ring: 2+2 + qs*8 + 2 = 6 + 8*qs bytes
486        let desc_size = qs * core::mem::size_of::<VirtqDesc>();
487        let avail_size = 6 + 2 * qs;
488        let used_size = 6 + 8 * qs;
489        let total_ring_bytes = desc_size + avail_size + used_size;
490        let ring_pages = total_ring_bytes.div_ceil(4096);
491
492        // Allocate pages for the ring structures.
493        // In a full implementation this would use the frame allocator for
494        // physically contiguous DMA memory. For now we use a zeroed Vec
495        // that is leaked to obtain 'static references.
496        let ring_mem = alloc::vec![0u8; ring_pages * 4096];
497        let ring_ptr = ring_mem.as_ptr() as usize;
498        // Leak the memory so it lives for 'static (device holds references)
499        core::mem::forget(ring_mem);
500
501        // Carve out descriptor table, avail ring, used ring
502        let desc_ptr = ring_ptr as *mut VirtqDesc;
503        let avail_ptr = (ring_ptr + desc_size) as *mut VirtqAvail;
504        let used_ptr = (ring_ptr + desc_size + avail_size) as *mut VirtqUsed;
505
506        // SAFETY: These pointers come from a just-allocated, zeroed region that
507        // is large enough and properly aligned (Vec guarantees alignment for u8).
508        // The region is leaked so it outlives the driver.
509        let descriptors = unsafe { core::slice::from_raw_parts_mut(desc_ptr, qs) };
510        let avail = unsafe { &mut *avail_ptr };
511        let used = unsafe { &mut *used_ptr };
512
513        let vq = Virtqueue::new(descriptors, avail, used, queue_size);
514
515        // Allocate per-descriptor data buffers (one 4KB page each)
516        let mut data_buffers = Vec::with_capacity(qs);
517        for _i in 0..qs {
518            let buf = alloc::vec![0u8; 4096];
519            let buf_virt = buf.as_ptr() as usize;
520            let buf_phys = buf_virt as u64; // Approximate; correct for identity/offset mapping
521            core::mem::forget(buf);
522            data_buffers.push(DataBuffer {
523                virt_addr: buf_virt,
524                phys_addr: buf_phys,
525            });
526        }
527
528        // For RX: point each descriptor at its data buffer and populate avail ring
529        if is_rx {
530            // SAFETY: desc_ptr and avail_ptr point into the same leaked, zeroed
531            // allocation used above. The region is large enough for qs descriptors
532            // and the avail ring. No aliasing: this is a second pass for RX init.
533            let desc_slice = unsafe { core::slice::from_raw_parts_mut(desc_ptr, qs) };
534            let avail_ref = unsafe { &mut *avail_ptr };
535            for i in 0..qs {
536                desc_slice[i].addr = data_buffers[i].phys_addr;
537                desc_slice[i].len = 4096;
538                desc_slice[i].flags = VIRTQ_DESC_F_WRITE; // Device-writable
539                desc_slice[i].next = 0;
540                avail_ref.ring[i] = i as u16;
541            }
542            avail_ref.idx = queue_size;
543        }
544
545        let dma = VirtqueueDmaRegion {
546            virt_addr: ring_ptr,
547            num_pages: ring_pages,
548        };
549
550        Ok((vq, dma, data_buffers))
551    }
552
553    /// Transmit a packet using virtqueue.
554    ///
555    /// Prepends a VirtioNetHeader, copies the frame data into the
556    /// pre-allocated TX data buffer, and kicks the device.
557    pub fn transmit(&mut self, packet: &[u8]) -> Result<(), KernelError> {
558        if self.state != DeviceState::Up {
559            return Err(KernelError::InvalidState {
560                expected: "up",
561                actual: "down",
562            });
563        }
564
565        let total_len = VIRTIO_NET_HDR_SIZE + packet.len();
566        if total_len > 4096 {
567            return Err(KernelError::InvalidArgument {
568                name: "packet_size",
569                value: "too_large",
570            });
571        }
572
573        let mmio = self.mmio_base;
574        if let Some(ref mut tx_queue) = self.tx_queue {
575            let desc_idx = tx_queue
576                .alloc_desc()
577                .ok_or(KernelError::ResourceExhausted {
578                    resource: "virtio_tx_descriptors",
579                })?;
580
581            // Copy VirtioNetHeader + frame data into the TX data buffer
582            if (desc_idx as usize) < self.tx_buffers.len() {
583                let buf_virt = self.tx_buffers[desc_idx as usize].virt_addr;
584                let buf_phys = self.tx_buffers[desc_idx as usize].phys_addr;
585                // SAFETY: buf_virt points to a leaked 4096-byte allocation.
586                // total_len <= 4096 checked above. We hold &mut self so no
587                // concurrent access to the same buffer.
588                let buf_slice =
589                    unsafe { core::slice::from_raw_parts_mut(buf_virt as *mut u8, 4096) };
590
591                // Write zeroed VirtioNetHeader (no offload)
592                buf_slice[..VIRTIO_NET_HDR_SIZE].fill(0);
593                // Write packet data after header
594                buf_slice[VIRTIO_NET_HDR_SIZE..total_len].copy_from_slice(packet);
595
596                // Configure descriptor
597                let desc = &mut tx_queue.descriptors[desc_idx as usize];
598                desc.addr = buf_phys;
599                desc.len = total_len as u32;
600                desc.flags = 0; // Device-readable (TX direction)
601            }
602
603            tx_queue.add_to_avail(desc_idx);
604
605            self.stats.tx_packets += 1;
606            self.stats.tx_bytes += packet.len() as u64;
607
608            // Poll-mode: free descriptor immediately after adding to avail ring.
609            // In interrupt mode, this would happen in the TX completion handler.
610            tx_queue.free_desc(desc_idx);
611        } else {
612            return Err(KernelError::HardwareError {
613                device: "virtio-net",
614                code: 0x01,
615            });
616        }
617
618        // Kick the device (TX queue = index 1)
619        // SAFETY: Writing to VirtIO queue notify register.
620        unsafe {
621            core::ptr::write_volatile((mmio + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32, 1);
622        }
623
624        Ok(())
625    }
626
627    /// Receive a packet using virtqueue.
628    ///
629    /// Checks the used ring for completed RX buffers, copies the received
630    /// frame data (after stripping the VirtioNetHeader), recycles the
631    /// descriptor, and returns the packet.
632    pub fn receive(&mut self) -> Result<Option<Packet>, KernelError> {
633        if self.state != DeviceState::Up {
634            return Ok(None);
635        }
636
637        if let Some(ref mut rx_queue) = self.rx_queue {
638            if let Some((desc_idx, len)) = rx_queue.get_used() {
639                let total_len = len as usize;
640
641                // Skip the VirtioNetHeader to get the actual frame data
642                let data_offset = VIRTIO_NET_HDR_SIZE;
643
644                let pkt = if (desc_idx as usize) < self.rx_buffers.len() && total_len > data_offset
645                {
646                    let buf_virt = self.rx_buffers[desc_idx as usize].virt_addr;
647                    let frame_len = total_len - data_offset;
648
649                    // SAFETY: buf_virt is a leaked 4096-byte allocation.
650                    // total_len <= 4096 (device respects descriptor len field).
651                    let buf_slice =
652                        unsafe { core::slice::from_raw_parts(buf_virt as *const u8, 4096) };
653                    let frame_data = &buf_slice[data_offset..data_offset + frame_len];
654
655                    crate::net::Packet::from_bytes(frame_data)
656                } else {
657                    crate::net::Packet::new(0)
658                };
659
660                self.stats.rx_packets += 1;
661                self.stats.rx_bytes += total_len as u64;
662
663                // Recycle: reset descriptor and re-add to available ring
664                let desc = &mut rx_queue.descriptors[desc_idx as usize];
665                desc.len = 4096;
666                desc.flags = VIRTQ_DESC_F_WRITE;
667                rx_queue.add_to_avail(desc_idx);
668
669                Ok(Some(pkt))
670            } else {
671                Ok(None)
672            }
673        } else {
674            Ok(None)
675        }
676    }
677
678    /// Notify device of available descriptors (kick virtqueue)
679    fn notify_queue(&self, queue_idx: u16) {
680        // Queue notify register offset varies by implementation
681        // For MMIO: typically at base + 0x50
682        self.write_reg(0x50, queue_idx as u32);
683    }
684
685    /// Get MAC address
686    pub fn mac_address(&self) -> MacAddress {
687        self.mac_address
688    }
689}
690
691// DeviceDriver trait implementation removed - using NetworkDevice trait instead
692
693impl NetworkDevice for VirtioNetDriver {
694    fn name(&self) -> &str {
695        "eth1"
696    }
697
698    fn mac_address(&self) -> MacAddress {
699        self.mac_address
700    }
701
702    fn capabilities(&self) -> DeviceCapabilities {
703        DeviceCapabilities {
704            max_transmission_unit: 1500,
705            supports_vlan: false,
706            supports_checksum_offload: (self.features & VIRTIO_NET_F_CSUM) != 0,
707            supports_tso: false,
708            supports_lro: false,
709        }
710    }
711
712    fn state(&self) -> DeviceState {
713        self.state
714    }
715
716    fn set_state(&mut self, state: DeviceState) -> Result<(), KernelError> {
717        match state {
718            DeviceState::Up => {
719                if self.state == DeviceState::Down {
720                    // Set DRIVER_OK status bit
721                    self.write_reg(0x70, 1 | 2 | 4 | 8);
722                }
723                self.state = DeviceState::Up;
724            }
725            DeviceState::Down => {
726                // Reset device
727                self.write_reg(0x70, 0);
728                self.state = DeviceState::Down;
729            }
730            _ => {
731                self.state = state;
732            }
733        }
734        Ok(())
735    }
736
737    fn statistics(&self) -> DeviceStatistics {
738        self.stats
739    }
740
741    fn transmit(&mut self, packet: &Packet) -> Result<(), KernelError> {
742        if self.state != DeviceState::Up {
743            self.stats.tx_dropped += 1;
744            return Err(KernelError::InvalidState {
745                expected: "up",
746                actual: "not_up",
747            });
748        }
749
750        // Delegate to the real virtqueue-based transmit
751        self.transmit(packet.data())
752    }
753
754    fn receive(&mut self) -> Result<Option<Packet>, KernelError> {
755        // Delegate to the real virtqueue-based receive
756        VirtioNetDriver::receive(self)
757    }
758}
759
760/// Initialize VirtIO-Net driver
761pub fn init() -> Result<(), KernelError> {
762    println!("[VIRTIO-NET] VirtIO Network driver module loaded");
763    Ok(())
764}
765
766#[cfg(test)]
767mod tests {
768    use super::*;
769
770    #[test]
771    fn test_virtio_constants() {
772        assert_eq!(VIRTIO_NET_F_MAC, 1 << 5);
773        assert_eq!(VIRTIO_NET_F_STATUS, 1 << 16);
774    }
775}