1#![allow(dead_code, clippy::needless_range_loop)]
11
12use alloc::vec::Vec;
13
14use crate::{
15 error::KernelError,
16 net::{
17 device::{DeviceCapabilities, DeviceState, DeviceStatistics, NetworkDevice},
18 MacAddress, Packet,
19 },
20};
21
22const VIRTIO_NET_F_CSUM: u64 = 1 << 0;
24const VIRTIO_NET_F_GUEST_CSUM: u64 = 1 << 1;
25const VIRTIO_NET_F_MAC: u64 = 1 << 5;
26const VIRTIO_NET_F_STATUS: u64 = 1 << 16;
27
28const VIRTIO_MMIO_MAGIC: usize = 0x00;
32const VIRTIO_MMIO_VERSION: usize = 0x04;
33const VIRTIO_MMIO_DEVICE_ID: usize = 0x08;
34const VIRTIO_MMIO_DEVICE_FEATURES: usize = 0x10;
35const VIRTIO_MMIO_DEVICE_FEATURES_SEL: usize = 0x14;
36const VIRTIO_MMIO_DRIVER_FEATURES: usize = 0x20;
37const VIRTIO_MMIO_DRIVER_FEATURES_SEL: usize = 0x24;
38const VIRTIO_MMIO_QUEUE_SEL: usize = 0x30;
39const VIRTIO_MMIO_QUEUE_NUM_MAX: usize = 0x34;
40const VIRTIO_MMIO_QUEUE_NUM: usize = 0x38;
41const VIRTIO_MMIO_QUEUE_READY: usize = 0x44;
42const VIRTIO_MMIO_QUEUE_NOTIFY: usize = 0x50;
43const VIRTIO_MMIO_STATUS: usize = 0x70;
44const VIRTIO_MMIO_QUEUE_DESC_LOW: usize = 0x80;
45const VIRTIO_MMIO_QUEUE_DESC_HIGH: usize = 0x84;
46const VIRTIO_MMIO_QUEUE_AVAIL_LOW: usize = 0x90;
47const VIRTIO_MMIO_QUEUE_AVAIL_HIGH: usize = 0x94;
48const VIRTIO_MMIO_QUEUE_USED_LOW: usize = 0xA0;
49const VIRTIO_MMIO_QUEUE_USED_HIGH: usize = 0xA4;
50const VIRTIO_MMIO_CONFIG_BASE: usize = 0x100;
51
52const VIRTIO_STATUS_ACKNOWLEDGE: u32 = 1;
54const VIRTIO_STATUS_DRIVER: u32 = 2;
55const VIRTIO_STATUS_DRIVER_OK: u32 = 4;
56const VIRTIO_STATUS_FEATURES_OK: u32 = 8;
57
58const VIRTIO_NET_HDR_SIZE: usize = 10;
60
61const VIRTQ_DESC_F_WRITE: u16 = 2;
63
64#[repr(C)]
66#[derive(Debug, Clone, Copy)]
67struct VirtioNetHeader {
68 flags: u8,
69 gso_type: u8,
70 hdr_len: u16,
71 gso_size: u16,
72 csum_start: u16,
73 csum_offset: u16,
74 num_buffers: u16,
75}
76
77#[repr(C)]
79#[derive(Debug, Clone, Copy)]
80struct VirtqDesc {
81 addr: u64,
82 len: u32,
83 flags: u16,
84 next: u16,
85}
86
87#[repr(C)]
89struct VirtqAvail {
90 flags: u16,
91 idx: u16,
92 ring: [u16; 256],
93 used_event: u16,
94}
95
96#[repr(C)]
98#[derive(Debug, Clone, Copy)]
99struct VirtqUsedElem {
100 id: u32,
101 len: u32,
102}
103
104#[repr(C)]
106struct VirtqUsed {
107 flags: u16,
108 idx: u16,
109 ring: [VirtqUsedElem; 256],
110 avail_event: u16,
111}
112
113struct Virtqueue {
115 size: u16,
117
118 descriptors: &'static mut [VirtqDesc],
120
121 avail: &'static mut VirtqAvail,
123
124 used: &'static mut VirtqUsed,
126
127 free_head: u16,
129
130 last_used_idx: u16,
132
133 num_free: u16,
135}
136
137impl Virtqueue {
138 fn new(
140 descriptors: &'static mut [VirtqDesc],
141 avail: &'static mut VirtqAvail,
142 used: &'static mut VirtqUsed,
143 size: u16,
144 ) -> Self {
145 for i in 0..size {
147 descriptors[i as usize].next = if i + 1 < size { i + 1 } else { 0 };
148 }
149
150 avail.flags = 0;
152 avail.idx = 0;
153 used.flags = 0;
154 used.idx = 0;
155
156 Self {
157 size,
158 descriptors,
159 avail,
160 used,
161 free_head: 0,
162 last_used_idx: 0,
163 num_free: size,
164 }
165 }
166
167 fn alloc_desc(&mut self) -> Option<u16> {
169 if self.num_free == 0 {
170 return None;
171 }
172
173 let desc_idx = self.free_head;
174 self.free_head = self.descriptors[desc_idx as usize].next;
175 self.num_free -= 1;
176
177 Some(desc_idx)
178 }
179
180 fn free_desc(&mut self, desc_idx: u16) {
182 self.descriptors[desc_idx as usize].next = self.free_head;
183 self.free_head = desc_idx;
184 self.num_free += 1;
185 }
186
187 fn add_to_avail(&mut self, desc_idx: u16) {
189 let avail_idx = self.avail.idx as usize % self.size as usize;
190 self.avail.ring[avail_idx] = desc_idx;
191
192 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
194
195 self.avail.idx = self.avail.idx.wrapping_add(1);
196 }
197
198 fn get_used(&mut self) -> Option<(u16, u32)> {
200 if self.last_used_idx == self.used.idx {
201 return None;
202 }
203
204 let used_idx = self.last_used_idx as usize % self.size as usize;
205 let used_elem = self.used.ring[used_idx];
206
207 self.last_used_idx = self.last_used_idx.wrapping_add(1);
208
209 Some((used_elem.id as u16, used_elem.len))
210 }
211}
212
213struct VirtqueueDmaRegion {
218 virt_addr: usize,
220 num_pages: usize,
222}
223
224struct DataBuffer {
226 virt_addr: usize,
227 phys_addr: u64,
228}
229
230pub struct VirtioNetDriver {
232 mmio_base: usize,
233 mac_address: MacAddress,
234 features: u64,
235 rx_queue_size: u16,
236 tx_queue_size: u16,
237 state: DeviceState,
238 stats: DeviceStatistics,
239
240 rx_queue: Option<Virtqueue>,
242 tx_queue: Option<Virtqueue>,
243
244 rx_dma: Option<VirtqueueDmaRegion>,
246 tx_dma: Option<VirtqueueDmaRegion>,
248 rx_buffers: Vec<DataBuffer>,
250 tx_buffers: Vec<DataBuffer>,
252}
253
254impl VirtioNetDriver {
255 pub fn new(mmio_base: usize) -> Result<Self, KernelError> {
257 let mut driver = Self {
258 mmio_base,
259 mac_address: MacAddress::ZERO,
260 features: 0,
261 rx_queue_size: 256,
262 tx_queue_size: 256,
263 state: DeviceState::Down,
264 stats: DeviceStatistics::default(),
265 rx_queue: None,
266 tx_queue: None,
267 rx_dma: None,
268 tx_dma: None,
269 rx_buffers: Vec::new(),
270 tx_buffers: Vec::new(),
271 };
272
273 driver.initialize()?;
274 Ok(driver)
275 }
276
277 fn read_reg(&self, offset: usize) -> u32 {
279 unsafe { core::ptr::read_volatile((self.mmio_base + offset) as *const u32) }
283 }
284
285 fn write_reg(&self, offset: usize, value: u32) {
287 unsafe {
289 core::ptr::write_volatile((self.mmio_base + offset) as *mut u32, value);
290 }
291 }
292
293 fn initialize(&mut self) -> Result<(), KernelError> {
306 self.write_reg(VIRTIO_MMIO_STATUS, 0);
308
309 self.write_reg(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE);
311
312 self.write_reg(
314 VIRTIO_MMIO_STATUS,
315 VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER,
316 );
317
318 self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
320 let features_low = self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64;
321 self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
322 let features_high = (self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64) << 32;
323 self.features = features_low | features_high;
324
325 let driver_features = VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS;
326 self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
327 self.write_reg(
328 VIRTIO_MMIO_DRIVER_FEATURES,
329 (driver_features & 0xFFFFFFFF) as u32,
330 );
331 self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
332 self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES, (driver_features >> 32) as u32);
333
334 self.write_reg(
336 VIRTIO_MMIO_STATUS,
337 VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER | VIRTIO_STATUS_FEATURES_OK,
338 );
339
340 if (self.read_reg(VIRTIO_MMIO_STATUS) & VIRTIO_STATUS_FEATURES_OK) == 0 {
341 return Err(KernelError::HardwareError {
342 device: "virtio-net",
343 code: 1,
344 });
345 }
346
347 self.setup_rx_queue()?;
350 self.setup_tx_queue()?;
351
352 if (self.features & VIRTIO_NET_F_MAC) != 0 {
354 let mut mac = [0u8; 6];
355 for (i, byte) in mac.iter_mut().enumerate() {
356 *byte = self.read_reg(VIRTIO_MMIO_CONFIG_BASE + i) as u8;
357 }
358 self.mac_address = MacAddress(mac);
359 }
360
361 self.write_reg(
363 VIRTIO_MMIO_STATUS,
364 VIRTIO_STATUS_ACKNOWLEDGE
365 | VIRTIO_STATUS_DRIVER
366 | VIRTIO_STATUS_FEATURES_OK
367 | VIRTIO_STATUS_DRIVER_OK,
368 );
369
370 println!(
371 "[VIRTIO-NET] Initialized with MAC: {:02X}:{:02X}:{:02X}:{:02X}:{:02X}:{:02X}",
372 self.mac_address.0[0],
373 self.mac_address.0[1],
374 self.mac_address.0[2],
375 self.mac_address.0[3],
376 self.mac_address.0[4],
377 self.mac_address.0[5]
378 );
379 println!(
380 "[VIRTIO-NET] RX queue: {} descs, TX queue: {} descs",
381 self.rx_queue_size, self.tx_queue_size
382 );
383
384 self.state = DeviceState::Up;
385 Ok(())
386 }
387
388 fn setup_rx_queue(&mut self) -> Result<(), KernelError> {
394 self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 0); let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
397 if max_size == 0 {
398 return Err(KernelError::HardwareError {
399 device: "virtio-net",
400 code: 2,
401 });
402 }
403 let queue_size = max_size.min(256);
404 self.rx_queue_size = queue_size;
405
406 let (vq, dma, buffers) = self.allocate_virtqueue(queue_size, true)?;
408
409 let desc_phys = dma.virt_addr as u64; let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
412 let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
413 let avail_phys = desc_phys + avail_offset as u64;
414 let used_phys = desc_phys + used_offset as u64;
415
416 self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
417 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
418 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
419 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
420 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
421 self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
422 self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
423 self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
424
425 self.rx_queue = Some(vq);
426 self.rx_dma = Some(dma);
427 self.rx_buffers = buffers;
428
429 Ok(())
430 }
431
432 fn setup_tx_queue(&mut self) -> Result<(), KernelError> {
434 self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 1); let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
437 if max_size == 0 {
438 return Err(KernelError::HardwareError {
439 device: "virtio-net",
440 code: 3,
441 });
442 }
443 let queue_size = max_size.min(256);
444 self.tx_queue_size = queue_size;
445
446 let (vq, dma, buffers) = self.allocate_virtqueue(queue_size, false)?;
447
448 let desc_phys = dma.virt_addr as u64;
449 let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
450 let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
451 let avail_phys = desc_phys + avail_offset as u64;
452 let used_phys = desc_phys + used_offset as u64;
453
454 self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
455 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
456 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
457 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
458 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
459 self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
460 self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
461 self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
462
463 self.tx_queue = Some(vq);
464 self.tx_dma = Some(dma);
465 self.tx_buffers = buffers;
466
467 Ok(())
468 }
469
470 fn allocate_virtqueue(
476 &self,
477 queue_size: u16,
478 is_rx: bool,
479 ) -> Result<(Virtqueue, VirtqueueDmaRegion, Vec<DataBuffer>), KernelError> {
480 let qs = queue_size as usize;
481
482 let desc_size = qs * core::mem::size_of::<VirtqDesc>();
487 let avail_size = 6 + 2 * qs;
488 let used_size = 6 + 8 * qs;
489 let total_ring_bytes = desc_size + avail_size + used_size;
490 let ring_pages = total_ring_bytes.div_ceil(4096);
491
492 let ring_mem = alloc::vec![0u8; ring_pages * 4096];
497 let ring_ptr = ring_mem.as_ptr() as usize;
498 core::mem::forget(ring_mem);
500
501 let desc_ptr = ring_ptr as *mut VirtqDesc;
503 let avail_ptr = (ring_ptr + desc_size) as *mut VirtqAvail;
504 let used_ptr = (ring_ptr + desc_size + avail_size) as *mut VirtqUsed;
505
506 let descriptors = unsafe { core::slice::from_raw_parts_mut(desc_ptr, qs) };
510 let avail = unsafe { &mut *avail_ptr };
511 let used = unsafe { &mut *used_ptr };
512
513 let vq = Virtqueue::new(descriptors, avail, used, queue_size);
514
515 let mut data_buffers = Vec::with_capacity(qs);
517 for _i in 0..qs {
518 let buf = alloc::vec![0u8; 4096];
519 let buf_virt = buf.as_ptr() as usize;
520 let buf_phys = buf_virt as u64; core::mem::forget(buf);
522 data_buffers.push(DataBuffer {
523 virt_addr: buf_virt,
524 phys_addr: buf_phys,
525 });
526 }
527
528 if is_rx {
530 let desc_slice = unsafe { core::slice::from_raw_parts_mut(desc_ptr, qs) };
534 let avail_ref = unsafe { &mut *avail_ptr };
535 for i in 0..qs {
536 desc_slice[i].addr = data_buffers[i].phys_addr;
537 desc_slice[i].len = 4096;
538 desc_slice[i].flags = VIRTQ_DESC_F_WRITE; desc_slice[i].next = 0;
540 avail_ref.ring[i] = i as u16;
541 }
542 avail_ref.idx = queue_size;
543 }
544
545 let dma = VirtqueueDmaRegion {
546 virt_addr: ring_ptr,
547 num_pages: ring_pages,
548 };
549
550 Ok((vq, dma, data_buffers))
551 }
552
553 pub fn transmit(&mut self, packet: &[u8]) -> Result<(), KernelError> {
558 if self.state != DeviceState::Up {
559 return Err(KernelError::InvalidState {
560 expected: "up",
561 actual: "down",
562 });
563 }
564
565 let total_len = VIRTIO_NET_HDR_SIZE + packet.len();
566 if total_len > 4096 {
567 return Err(KernelError::InvalidArgument {
568 name: "packet_size",
569 value: "too_large",
570 });
571 }
572
573 let mmio = self.mmio_base;
574 if let Some(ref mut tx_queue) = self.tx_queue {
575 let desc_idx = tx_queue
576 .alloc_desc()
577 .ok_or(KernelError::ResourceExhausted {
578 resource: "virtio_tx_descriptors",
579 })?;
580
581 if (desc_idx as usize) < self.tx_buffers.len() {
583 let buf_virt = self.tx_buffers[desc_idx as usize].virt_addr;
584 let buf_phys = self.tx_buffers[desc_idx as usize].phys_addr;
585 let buf_slice =
589 unsafe { core::slice::from_raw_parts_mut(buf_virt as *mut u8, 4096) };
590
591 buf_slice[..VIRTIO_NET_HDR_SIZE].fill(0);
593 buf_slice[VIRTIO_NET_HDR_SIZE..total_len].copy_from_slice(packet);
595
596 let desc = &mut tx_queue.descriptors[desc_idx as usize];
598 desc.addr = buf_phys;
599 desc.len = total_len as u32;
600 desc.flags = 0; }
602
603 tx_queue.add_to_avail(desc_idx);
604
605 self.stats.tx_packets += 1;
606 self.stats.tx_bytes += packet.len() as u64;
607
608 tx_queue.free_desc(desc_idx);
611 } else {
612 return Err(KernelError::HardwareError {
613 device: "virtio-net",
614 code: 0x01,
615 });
616 }
617
618 unsafe {
621 core::ptr::write_volatile((mmio + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32, 1);
622 }
623
624 Ok(())
625 }
626
627 pub fn receive(&mut self) -> Result<Option<Packet>, KernelError> {
633 if self.state != DeviceState::Up {
634 return Ok(None);
635 }
636
637 if let Some(ref mut rx_queue) = self.rx_queue {
638 if let Some((desc_idx, len)) = rx_queue.get_used() {
639 let total_len = len as usize;
640
641 let data_offset = VIRTIO_NET_HDR_SIZE;
643
644 let pkt = if (desc_idx as usize) < self.rx_buffers.len() && total_len > data_offset
645 {
646 let buf_virt = self.rx_buffers[desc_idx as usize].virt_addr;
647 let frame_len = total_len - data_offset;
648
649 let buf_slice =
652 unsafe { core::slice::from_raw_parts(buf_virt as *const u8, 4096) };
653 let frame_data = &buf_slice[data_offset..data_offset + frame_len];
654
655 crate::net::Packet::from_bytes(frame_data)
656 } else {
657 crate::net::Packet::new(0)
658 };
659
660 self.stats.rx_packets += 1;
661 self.stats.rx_bytes += total_len as u64;
662
663 let desc = &mut rx_queue.descriptors[desc_idx as usize];
665 desc.len = 4096;
666 desc.flags = VIRTQ_DESC_F_WRITE;
667 rx_queue.add_to_avail(desc_idx);
668
669 Ok(Some(pkt))
670 } else {
671 Ok(None)
672 }
673 } else {
674 Ok(None)
675 }
676 }
677
678 fn notify_queue(&self, queue_idx: u16) {
680 self.write_reg(0x50, queue_idx as u32);
683 }
684
685 pub fn mac_address(&self) -> MacAddress {
687 self.mac_address
688 }
689}
690
691impl NetworkDevice for VirtioNetDriver {
694 fn name(&self) -> &str {
695 "eth1"
696 }
697
698 fn mac_address(&self) -> MacAddress {
699 self.mac_address
700 }
701
702 fn capabilities(&self) -> DeviceCapabilities {
703 DeviceCapabilities {
704 max_transmission_unit: 1500,
705 supports_vlan: false,
706 supports_checksum_offload: (self.features & VIRTIO_NET_F_CSUM) != 0,
707 supports_tso: false,
708 supports_lro: false,
709 }
710 }
711
712 fn state(&self) -> DeviceState {
713 self.state
714 }
715
716 fn set_state(&mut self, state: DeviceState) -> Result<(), KernelError> {
717 match state {
718 DeviceState::Up => {
719 if self.state == DeviceState::Down {
720 self.write_reg(0x70, 1 | 2 | 4 | 8);
722 }
723 self.state = DeviceState::Up;
724 }
725 DeviceState::Down => {
726 self.write_reg(0x70, 0);
728 self.state = DeviceState::Down;
729 }
730 _ => {
731 self.state = state;
732 }
733 }
734 Ok(())
735 }
736
737 fn statistics(&self) -> DeviceStatistics {
738 self.stats
739 }
740
741 fn transmit(&mut self, packet: &Packet) -> Result<(), KernelError> {
742 if self.state != DeviceState::Up {
743 self.stats.tx_dropped += 1;
744 return Err(KernelError::InvalidState {
745 expected: "up",
746 actual: "not_up",
747 });
748 }
749
750 self.transmit(packet.data())
752 }
753
754 fn receive(&mut self) -> Result<Option<Packet>, KernelError> {
755 VirtioNetDriver::receive(self)
757 }
758}
759
760pub fn init() -> Result<(), KernelError> {
762 println!("[VIRTIO-NET] VirtIO Network driver module loaded");
763 Ok(())
764}
765
766#[cfg(test)]
767mod tests {
768 use super::*;
769
770 #[test]
771 fn test_virtio_constants() {
772 assert_eq!(VIRTIO_NET_F_MAC, 1 << 5);
773 assert_eq!(VIRTIO_NET_F_STATUS, 1 << 16);
774 }
775}