1#![allow(dead_code, clippy::needless_range_loop)]
30
31use alloc::vec::Vec;
32
33use crate::error::KernelError;
34
35const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x100;
43const VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: u32 = 0x101;
45const VIRTIO_GPU_CMD_RESOURCE_UNREF: u32 = 0x102;
47const VIRTIO_GPU_CMD_SET_SCANOUT: u32 = 0x103;
49const VIRTIO_GPU_CMD_RESOURCE_FLUSH: u32 = 0x104;
51const VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: u32 = 0x105;
53const VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: u32 = 0x106;
55const VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: u32 = 0x107;
57const VIRTIO_GPU_CMD_GET_CAPSET_INFO: u32 = 0x108;
59const VIRTIO_GPU_CMD_GET_CAPSET: u32 = 0x109;
61const VIRTIO_GPU_CMD_GET_EDID: u32 = 0x10A;
63
64const VIRTIO_GPU_RESP_OK_NODATA: u32 = 0x1100;
68const VIRTIO_GPU_RESP_OK_DISPLAY_INFO: u32 = 0x1101;
70const VIRTIO_GPU_RESP_OK_CAPSET_INFO: u32 = 0x1102;
72const VIRTIO_GPU_RESP_OK_CAPSET: u32 = 0x1103;
74const VIRTIO_GPU_RESP_OK_EDID: u32 = 0x1104;
76
77const VIRTIO_GPU_RESP_ERR_UNSPEC: u32 = 0x1200;
79const VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY: u32 = 0x1201;
81const VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID: u32 = 0x1202;
83const VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID: u32 = 0x1203;
85const VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID: u32 = 0x1204;
87const VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER: u32 = 0x1205;
89
90const FORMAT_B8G8R8A8_UNORM: u32 = 1;
94const FORMAT_R8G8B8A8_UNORM: u32 = 67;
96const FORMAT_B8G8R8X8_UNORM: u32 = 68;
98const FORMAT_R8G8B8X8_UNORM: u32 = 134;
100
101const VIRTIO_GPU_F_VIRGL: u64 = 1 << 0;
105const VIRTIO_GPU_F_EDID: u64 = 1 << 1;
107
108const VIRTIO_GPU_MAX_SCANOUTS: usize = 16;
110
111const VIRTIO_MMIO_MAGIC: usize = 0x00;
116const VIRTIO_MMIO_VERSION: usize = 0x04;
117const VIRTIO_MMIO_DEVICE_ID: usize = 0x08;
118const VIRTIO_MMIO_DEVICE_FEATURES: usize = 0x10;
119const VIRTIO_MMIO_DEVICE_FEATURES_SEL: usize = 0x14;
120const VIRTIO_MMIO_DRIVER_FEATURES: usize = 0x20;
121const VIRTIO_MMIO_DRIVER_FEATURES_SEL: usize = 0x24;
122const VIRTIO_MMIO_QUEUE_SEL: usize = 0x30;
123const VIRTIO_MMIO_QUEUE_NUM_MAX: usize = 0x34;
124const VIRTIO_MMIO_QUEUE_NUM: usize = 0x38;
125const VIRTIO_MMIO_QUEUE_READY: usize = 0x44;
126const VIRTIO_MMIO_QUEUE_NOTIFY: usize = 0x50;
127const VIRTIO_MMIO_STATUS: usize = 0x70;
128const VIRTIO_MMIO_QUEUE_DESC_LOW: usize = 0x80;
129const VIRTIO_MMIO_QUEUE_DESC_HIGH: usize = 0x84;
130const VIRTIO_MMIO_QUEUE_AVAIL_LOW: usize = 0x90;
131const VIRTIO_MMIO_QUEUE_AVAIL_HIGH: usize = 0x94;
132const VIRTIO_MMIO_QUEUE_USED_LOW: usize = 0xA0;
133const VIRTIO_MMIO_QUEUE_USED_HIGH: usize = 0xA4;
134const VIRTIO_MMIO_CONFIG_BASE: usize = 0x100;
135
136const VIRTIO_STATUS_ACKNOWLEDGE: u32 = 1;
138const VIRTIO_STATUS_DRIVER: u32 = 2;
139const VIRTIO_STATUS_DRIVER_OK: u32 = 4;
140const VIRTIO_STATUS_FEATURES_OK: u32 = 8;
141
142const VIRTQ_DESC_F_NEXT: u16 = 1;
144const VIRTQ_DESC_F_WRITE: u16 = 2;
146
147#[repr(C)]
153#[derive(Debug, Clone, Copy)]
154struct VirtioGpuCtrlHdr {
155 hdr_type: u32,
157 flags: u32,
159 fence_id: u64,
161 ctx_id: u32,
163 ring_idx: u8,
165 padding: [u8; 3],
167}
168
169impl VirtioGpuCtrlHdr {
170 fn new(hdr_type: u32) -> Self {
172 Self {
173 hdr_type,
174 flags: 0,
175 fence_id: 0,
176 ctx_id: 0,
177 ring_idx: 0,
178 padding: [0; 3],
179 }
180 }
181}
182
183#[repr(C)]
185#[derive(Debug, Clone, Copy)]
186pub struct VirtioGpuRect {
187 pub x: u32,
189 pub y: u32,
191 pub width: u32,
193 pub height: u32,
195}
196
197impl VirtioGpuRect {
198 pub fn new(x: u32, y: u32, width: u32, height: u32) -> Self {
200 Self {
201 x,
202 y,
203 width,
204 height,
205 }
206 }
207}
208
209#[repr(C)]
211#[derive(Debug, Clone, Copy)]
212pub struct VirtioGpuDisplayOne {
213 rect: VirtioGpuRect,
215 enabled: u32,
217 flags: u32,
219}
220
221#[repr(C)]
223#[derive(Debug, Clone, Copy)]
224struct VirtioGpuRespDisplayInfo {
225 hdr: VirtioGpuCtrlHdr,
227 pmodes: [VirtioGpuDisplayOne; VIRTIO_GPU_MAX_SCANOUTS],
229}
230
231#[repr(C)]
233#[derive(Debug, Clone, Copy)]
234struct VirtioGpuResourceCreate2d {
235 hdr: VirtioGpuCtrlHdr,
237 resource_id: u32,
239 format: u32,
241 width: u32,
243 height: u32,
245}
246
247#[repr(C)]
249#[derive(Debug, Clone, Copy)]
250struct VirtioGpuResourceUnref {
251 hdr: VirtioGpuCtrlHdr,
253 resource_id: u32,
255 padding: u32,
257}
258
259#[repr(C)]
264#[derive(Debug, Clone, Copy)]
265struct VirtioGpuResourceAttachBacking {
266 hdr: VirtioGpuCtrlHdr,
268 resource_id: u32,
270 nr_entries: u32,
272}
273
274#[repr(C)]
276#[derive(Debug, Clone, Copy)]
277struct VirtioGpuMemEntry {
278 addr: u64,
280 length: u32,
282 padding: u32,
284}
285
286#[repr(C)]
288#[derive(Debug, Clone, Copy)]
289struct VirtioGpuSetScanout {
290 hdr: VirtioGpuCtrlHdr,
292 rect: VirtioGpuRect,
294 scanout_id: u32,
296 resource_id: u32,
298}
299
300#[repr(C)]
302#[derive(Debug, Clone, Copy)]
303struct VirtioGpuTransferToHost2d {
304 hdr: VirtioGpuCtrlHdr,
306 rect: VirtioGpuRect,
308 offset: u64,
310 resource_id: u32,
312 padding: u32,
314}
315
316#[repr(C)]
318#[derive(Debug, Clone, Copy)]
319struct VirtioGpuResourceFlush {
320 hdr: VirtioGpuCtrlHdr,
322 rect: VirtioGpuRect,
324 resource_id: u32,
326 padding: u32,
328}
329
330#[repr(C)]
332#[derive(Debug, Clone, Copy)]
333struct VirtioGpuResourceDetachBacking {
334 hdr: VirtioGpuCtrlHdr,
336 resource_id: u32,
338 padding: u32,
340}
341
342#[repr(C)]
344#[derive(Debug, Clone, Copy)]
345struct VirtioGpuGetEdid {
346 hdr: VirtioGpuCtrlHdr,
348 scanout: u32,
350 padding: u32,
352}
353
354#[repr(C)]
356#[derive(Debug, Clone, Copy)]
357struct VirtioGpuRespEdid {
358 hdr: VirtioGpuCtrlHdr,
360 size: u32,
362 padding: u32,
364 edid: [u8; 1024],
366}
367
368#[repr(C)]
374#[derive(Debug, Clone, Copy)]
375struct VirtqDesc {
376 addr: u64,
378 len: u32,
380 flags: u16,
382 next: u16,
384}
385
386#[repr(C)]
388struct VirtqAvail {
389 flags: u16,
390 idx: u16,
391 ring: [u16; 256],
392 used_event: u16,
393}
394
395#[repr(C)]
397#[derive(Debug, Clone, Copy)]
398struct VirtqUsedElem {
399 id: u32,
400 len: u32,
401}
402
403#[repr(C)]
405struct VirtqUsed {
406 flags: u16,
407 idx: u16,
408 ring: [VirtqUsedElem; 256],
409 avail_event: u16,
410}
411
412struct Virtqueue {
415 size: u16,
417
418 descriptors: &'static mut [VirtqDesc],
420
421 avail: &'static mut VirtqAvail,
423
424 used: &'static mut VirtqUsed,
426
427 free_head: u16,
429
430 last_used_idx: u16,
432
433 num_free: u16,
435}
436
437impl Virtqueue {
438 fn new(
440 descriptors: &'static mut [VirtqDesc],
441 avail: &'static mut VirtqAvail,
442 used: &'static mut VirtqUsed,
443 size: u16,
444 ) -> Self {
445 for i in 0..size {
447 descriptors[i as usize].next = if i + 1 < size { i + 1 } else { 0 };
448 }
449
450 avail.flags = 0;
452 avail.idx = 0;
453 used.flags = 0;
454 used.idx = 0;
455
456 Self {
457 size,
458 descriptors,
459 avail,
460 used,
461 free_head: 0,
462 last_used_idx: 0,
463 num_free: size,
464 }
465 }
466
467 fn alloc_desc(&mut self) -> Option<u16> {
469 if self.num_free == 0 {
470 return None;
471 }
472
473 let desc_idx = self.free_head;
474 self.free_head = self.descriptors[desc_idx as usize].next;
475 self.num_free -= 1;
476
477 Some(desc_idx)
478 }
479
480 fn free_desc(&mut self, desc_idx: u16) {
482 self.descriptors[desc_idx as usize].next = self.free_head;
483 self.free_head = desc_idx;
484 self.num_free += 1;
485 }
486
487 fn add_to_avail(&mut self, desc_idx: u16) {
489 let avail_idx = self.avail.idx as usize % self.size as usize;
490 self.avail.ring[avail_idx] = desc_idx;
491
492 core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
494
495 self.avail.idx = self.avail.idx.wrapping_add(1);
496 }
497
498 fn get_used(&mut self) -> Option<(u16, u32)> {
500 if self.last_used_idx == self.used.idx {
501 return None;
502 }
503
504 let used_idx = self.last_used_idx as usize % self.size as usize;
505 let used_elem = self.used.ring[used_idx];
506
507 self.last_used_idx = self.last_used_idx.wrapping_add(1);
508
509 Some((used_elem.id as u16, used_elem.len))
510 }
511}
512
513struct VirtqueueDmaRegion {
518 virt_addr: usize,
520 num_pages: usize,
522}
523
524struct DataBuffer {
526 virt_addr: usize,
527 phys_addr: u64,
528}
529
530#[derive(Debug, Clone, Copy, PartialEq, Eq)]
536enum GpuDeviceState {
537 Uninitialized,
539 Ready,
541 Error,
543}
544
545pub struct VirtioGpuDriver {
550 mmio_base: usize,
552
553 features: u64,
555
556 state: GpuDeviceState,
558
559 controlq: Option<Virtqueue>,
561 cursorq: Option<Virtqueue>,
563
564 ctrl_dma: Option<VirtqueueDmaRegion>,
566 cursor_dma: Option<VirtqueueDmaRegion>,
568
569 ctrl_buffers: Vec<DataBuffer>,
571 cursor_buffers: Vec<DataBuffer>,
573
574 display_info: Option<VirtioGpuDisplayOne>,
576
577 next_resource_id: u32,
579 framebuffer_resource_id: u32,
581 framebuffer_backing: Option<Vec<u32>>,
583
584 width: u32,
586 height: u32,
588}
589
590impl VirtioGpuDriver {
591 pub fn new(mmio_base: usize) -> Result<Self, KernelError> {
593 let mut driver = Self {
594 mmio_base,
595 features: 0,
596 state: GpuDeviceState::Uninitialized,
597 controlq: None,
598 cursorq: None,
599 ctrl_dma: None,
600 cursor_dma: None,
601 ctrl_buffers: Vec::new(),
602 cursor_buffers: Vec::new(),
603 display_info: None,
604 next_resource_id: 1,
605 framebuffer_resource_id: 0,
606 framebuffer_backing: None,
607 width: 0,
608 height: 0,
609 };
610
611 driver.initialize()?;
612 Ok(driver)
613 }
614
615 fn read_reg(&self, offset: usize) -> u32 {
619 unsafe { core::ptr::read_volatile((self.mmio_base + offset) as *const u32) }
624 }
625
626 fn write_reg(&self, offset: usize, value: u32) {
628 unsafe {
630 core::ptr::write_volatile((self.mmio_base + offset) as *mut u32, value);
631 }
632 }
633
634 fn initialize(&mut self) -> Result<(), KernelError> {
647 let magic = self.read_reg(VIRTIO_MMIO_MAGIC);
649 if magic != 0x74726976 {
650 crate::println!(
652 "[VIRTIO-GPU] Invalid magic: {:#010x} (expected 0x74726976)",
653 magic
654 );
655 return Err(KernelError::HardwareError {
656 device: "virtio-gpu",
657 code: 0x01,
658 });
659 }
660
661 let device_id = self.read_reg(VIRTIO_MMIO_DEVICE_ID);
663 if device_id != 16 {
664 crate::println!(
665 "[VIRTIO-GPU] Unexpected device ID: {} (expected 16)",
666 device_id
667 );
668 return Err(KernelError::HardwareError {
669 device: "virtio-gpu",
670 code: 0x02,
671 });
672 }
673
674 let version = self.read_reg(VIRTIO_MMIO_VERSION);
675 crate::println!(
676 "[VIRTIO-GPU] Found virtio-gpu device (MMIO version {})",
677 version
678 );
679
680 self.write_reg(VIRTIO_MMIO_STATUS, 0);
682
683 self.write_reg(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE);
685
686 self.write_reg(
688 VIRTIO_MMIO_STATUS,
689 VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER,
690 );
691
692 self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
694 let features_low = self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64;
695 self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
696 let features_high = (self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64) << 32;
697 self.features = features_low | features_high;
698
699 crate::println!("[VIRTIO-GPU] Device features: {:#018x}", self.features);
700
701 if self.features & VIRTIO_GPU_F_VIRGL != 0 {
702 crate::println!("[VIRTIO-GPU] - VIRGL (3D) supported");
703 }
704 if self.features & VIRTIO_GPU_F_EDID != 0 {
705 crate::println!("[VIRTIO-GPU] - EDID supported");
706 }
707
708 let driver_features = self.features & VIRTIO_GPU_F_EDID;
710 self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
711 self.write_reg(
712 VIRTIO_MMIO_DRIVER_FEATURES,
713 (driver_features & 0xFFFFFFFF) as u32,
714 );
715 self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
716 self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES, (driver_features >> 32) as u32);
717
718 self.write_reg(
720 VIRTIO_MMIO_STATUS,
721 VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER | VIRTIO_STATUS_FEATURES_OK,
722 );
723
724 if (self.read_reg(VIRTIO_MMIO_STATUS) & VIRTIO_STATUS_FEATURES_OK) == 0 {
725 crate::println!("[VIRTIO-GPU] Device did not accept features");
726 return Err(KernelError::HardwareError {
727 device: "virtio-gpu",
728 code: 0x03,
729 });
730 }
731
732 self.setup_control_queue()?;
734 self.setup_cursor_queue()?;
735
736 self.write_reg(
738 VIRTIO_MMIO_STATUS,
739 VIRTIO_STATUS_ACKNOWLEDGE
740 | VIRTIO_STATUS_DRIVER
741 | VIRTIO_STATUS_FEATURES_OK
742 | VIRTIO_STATUS_DRIVER_OK,
743 );
744
745 crate::println!("[VIRTIO-GPU] Device status: DRIVER_OK");
746
747 match self.get_display_info() {
749 Ok(display) => {
750 self.width = display.rect.width;
751 self.height = display.rect.height;
752 self.display_info = Some(display);
753
754 crate::println!(
755 "[VIRTIO-GPU] Display: {}x{} (enabled={})",
756 self.width,
757 self.height,
758 display.enabled
759 );
760
761 if let Err(e) = self.setup_framebuffer() {
763 crate::println!("[VIRTIO-GPU] Framebuffer setup failed: {:?}", e);
764 }
766 }
767 Err(e) => {
768 crate::println!("[VIRTIO-GPU] Display info query failed: {:?}", e);
769 self.width = 1024;
771 self.height = 768;
772 }
773 }
774
775 self.state = GpuDeviceState::Ready;
776 Ok(())
777 }
778
779 fn setup_control_queue(&mut self) -> Result<(), KernelError> {
781 self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 0);
782
783 let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
784 if max_size == 0 {
785 return Err(KernelError::HardwareError {
786 device: "virtio-gpu",
787 code: 0x10,
788 });
789 }
790 let queue_size = max_size.min(256);
791
792 crate::println!(
793 "[VIRTIO-GPU] Control queue: max={}, using={}",
794 max_size,
795 queue_size
796 );
797
798 let (vq, dma, buffers) = self.allocate_virtqueue(queue_size)?;
799
800 let desc_phys = dma.virt_addr as u64;
802 let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
803 let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
804 let avail_phys = desc_phys + avail_offset as u64;
805 let used_phys = desc_phys + used_offset as u64;
806
807 self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
808 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
809 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
810 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
811 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
812 self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
813 self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
814 self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
815
816 self.controlq = Some(vq);
817 self.ctrl_dma = Some(dma);
818 self.ctrl_buffers = buffers;
819
820 Ok(())
821 }
822
823 fn setup_cursor_queue(&mut self) -> Result<(), KernelError> {
825 self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 1);
826
827 let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
828 if max_size == 0 {
829 crate::println!("[VIRTIO-GPU] Cursor queue not available (max_size=0)");
830 return Ok(()); }
832 let queue_size = max_size.min(256);
833
834 crate::println!(
835 "[VIRTIO-GPU] Cursor queue: max={}, using={}",
836 max_size,
837 queue_size
838 );
839
840 let (vq, dma, buffers) = self.allocate_virtqueue(queue_size)?;
841
842 let desc_phys = dma.virt_addr as u64;
843 let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
844 let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
845 let avail_phys = desc_phys + avail_offset as u64;
846 let used_phys = desc_phys + used_offset as u64;
847
848 self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
849 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
850 self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
851 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
852 self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
853 self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
854 self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
855 self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
856
857 self.cursorq = Some(vq);
858 self.cursor_dma = Some(dma);
859 self.cursor_buffers = buffers;
860
861 Ok(())
862 }
863
864 fn allocate_virtqueue(
866 &self,
867 queue_size: u16,
868 ) -> Result<(Virtqueue, VirtqueueDmaRegion, Vec<DataBuffer>), KernelError> {
869 let qs = queue_size as usize;
870
871 let desc_size = qs * core::mem::size_of::<VirtqDesc>();
876 let avail_size = 6 + 2 * qs;
877 let used_size = 6 + 8 * qs;
878 let total_ring_bytes = desc_size + avail_size + used_size;
879 let ring_pages = total_ring_bytes.div_ceil(4096);
880
881 let ring_mem = alloc::vec![0u8; ring_pages * 4096];
886 let ring_ptr = ring_mem.as_ptr() as usize;
887 core::mem::forget(ring_mem);
889
890 let desc_ptr = ring_ptr as *mut VirtqDesc;
892 let avail_ptr = (ring_ptr + desc_size) as *mut VirtqAvail;
893 let used_ptr = (ring_ptr + desc_size + avail_size) as *mut VirtqUsed;
894
895 let descriptors = unsafe { core::slice::from_raw_parts_mut(desc_ptr, qs) };
899 let avail = unsafe { &mut *avail_ptr };
900 let used = unsafe { &mut *used_ptr };
901
902 let vq = Virtqueue::new(descriptors, avail, used, queue_size);
903
904 let mut data_buffers = Vec::with_capacity(qs);
906 for _i in 0..qs {
907 let buf = alloc::vec![0u8; 4096];
908 let buf_virt = buf.as_ptr() as usize;
909 let buf_phys = buf_virt as u64; core::mem::forget(buf);
911 data_buffers.push(DataBuffer {
912 virt_addr: buf_virt,
913 phys_addr: buf_phys,
914 });
915 }
916
917 let dma = VirtqueueDmaRegion {
918 virt_addr: ring_ptr,
919 num_pages: ring_pages,
920 };
921
922 Ok((vq, dma, data_buffers))
923 }
924
925 fn send_command_raw(
936 &mut self,
937 cmd_bytes: &[u8],
938 resp_len: usize,
939 ) -> Result<(VirtioGpuCtrlHdr, usize), KernelError> {
940 let mmio = self.mmio_base;
941
942 let controlq = self.controlq.as_mut().ok_or(KernelError::HardwareError {
943 device: "virtio-gpu",
944 code: 0x20,
945 })?;
946
947 let req_desc_idx = controlq
949 .alloc_desc()
950 .ok_or(KernelError::ResourceExhausted {
951 resource: "virtio_gpu_ctrl_descriptors",
952 })?;
953
954 let resp_desc_idx = controlq.alloc_desc().ok_or_else(|| {
955 controlq.free_desc(req_desc_idx);
956 KernelError::ResourceExhausted {
957 resource: "virtio_gpu_ctrl_descriptors",
958 }
959 })?;
960
961 if (req_desc_idx as usize) < self.ctrl_buffers.len()
963 && (resp_desc_idx as usize) < self.ctrl_buffers.len()
964 {
965 let req_buf_virt = self.ctrl_buffers[req_desc_idx as usize].virt_addr;
966 let req_buf_phys = self.ctrl_buffers[req_desc_idx as usize].phys_addr;
967 let resp_buf_virt = self.ctrl_buffers[resp_desc_idx as usize].virt_addr;
968 let resp_buf_phys = self.ctrl_buffers[resp_desc_idx as usize].phys_addr;
969
970 let req_slice =
974 unsafe { core::slice::from_raw_parts_mut(req_buf_virt as *mut u8, 4096) };
975 let copy_len = cmd_bytes.len().min(4096);
976 req_slice[..copy_len].copy_from_slice(&cmd_bytes[..copy_len]);
977
978 let resp_slice =
982 unsafe { core::slice::from_raw_parts_mut(resp_buf_virt as *mut u8, 4096) };
983 resp_slice[..resp_len.min(4096)].fill(0);
984
985 controlq.descriptors[req_desc_idx as usize] = VirtqDesc {
987 addr: req_buf_phys,
988 len: copy_len as u32,
989 flags: VIRTQ_DESC_F_NEXT,
990 next: resp_desc_idx,
991 };
992
993 controlq.descriptors[resp_desc_idx as usize] = VirtqDesc {
995 addr: resp_buf_phys,
996 len: resp_len.min(4096) as u32,
997 flags: VIRTQ_DESC_F_WRITE,
998 next: 0,
999 };
1000
1001 controlq.add_to_avail(req_desc_idx);
1003
1004 unsafe {
1007 core::ptr::write_volatile((mmio + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32, 0);
1008 }
1009
1010 let mut timeout = 1_000_000u32;
1012 loop {
1013 if let Some((_used_id, used_len)) = controlq.get_used() {
1014 let resp_hdr = unsafe { *(resp_buf_virt as *const VirtioGpuCtrlHdr) };
1017
1018 controlq.free_desc(req_desc_idx);
1020 controlq.free_desc(resp_desc_idx);
1021
1022 return Ok((resp_hdr, used_len as usize));
1023 }
1024
1025 timeout -= 1;
1026 if timeout == 0 {
1027 controlq.free_desc(req_desc_idx);
1028 controlq.free_desc(resp_desc_idx);
1029 return Err(KernelError::Timeout {
1030 operation: "virtio_gpu_command",
1031 duration_ms: 1000,
1032 });
1033 }
1034
1035 core::hint::spin_loop();
1036 }
1037 } else {
1038 controlq.free_desc(req_desc_idx);
1039 controlq.free_desc(resp_desc_idx);
1040 Err(KernelError::ResourceExhausted {
1041 resource: "virtio_gpu_ctrl_buffers",
1042 })
1043 }
1044 }
1045
1046 fn send_simple_command<T: Sized>(&mut self, cmd: &T) -> Result<(), KernelError> {
1048 let cmd_bytes = unsafe {
1051 core::slice::from_raw_parts(cmd as *const T as *const u8, core::mem::size_of::<T>())
1052 };
1053
1054 let (resp_hdr, _len) =
1055 self.send_command_raw(cmd_bytes, core::mem::size_of::<VirtioGpuCtrlHdr>())?;
1056
1057 if resp_hdr.hdr_type != VIRTIO_GPU_RESP_OK_NODATA {
1058 return Err(Self::response_to_error(resp_hdr.hdr_type));
1059 }
1060
1061 Ok(())
1062 }
1063
1064 fn response_to_error(resp_type: u32) -> KernelError {
1066 match resp_type {
1067 VIRTIO_GPU_RESP_ERR_UNSPEC => KernelError::HardwareError {
1068 device: "virtio-gpu",
1069 code: 0x1200,
1070 },
1071 VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY => KernelError::OutOfMemory {
1072 requested: 0,
1073 available: 0,
1074 },
1075 VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID => KernelError::InvalidArgument {
1076 name: "scanout_id",
1077 value: "invalid",
1078 },
1079 VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID => KernelError::InvalidArgument {
1080 name: "resource_id",
1081 value: "invalid",
1082 },
1083 VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID => KernelError::InvalidArgument {
1084 name: "context_id",
1085 value: "invalid",
1086 },
1087 VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER => KernelError::InvalidArgument {
1088 name: "parameter",
1089 value: "invalid",
1090 },
1091 _ => KernelError::HardwareError {
1092 device: "virtio-gpu",
1093 code: resp_type,
1094 },
1095 }
1096 }
1097
1098 pub fn get_display_info(&mut self) -> Result<VirtioGpuDisplayOne, KernelError> {
1104 let cmd = VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
1105 let cmd_bytes = unsafe {
1107 core::slice::from_raw_parts(
1108 &cmd as *const VirtioGpuCtrlHdr as *const u8,
1109 core::mem::size_of::<VirtioGpuCtrlHdr>(),
1110 )
1111 };
1112
1113 let resp_size = core::mem::size_of::<VirtioGpuRespDisplayInfo>();
1114 let (resp_hdr, _len) = self.send_command_raw(cmd_bytes, resp_size)?;
1115
1116 if resp_hdr.hdr_type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO {
1117 return Err(Self::response_to_error(resp_hdr.hdr_type));
1118 }
1119
1120 self.get_display_info_internal()
1139 }
1140
1141 fn get_display_info_internal(&mut self) -> Result<VirtioGpuDisplayOne, KernelError> {
1144 let cmd = VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
1145 let cmd_bytes = unsafe {
1147 core::slice::from_raw_parts(
1148 &cmd as *const VirtioGpuCtrlHdr as *const u8,
1149 core::mem::size_of::<VirtioGpuCtrlHdr>(),
1150 )
1151 };
1152
1153 let resp_size = core::mem::size_of::<VirtioGpuRespDisplayInfo>();
1154 let mmio = self.mmio_base;
1155
1156 let controlq = self.controlq.as_mut().ok_or(KernelError::HardwareError {
1157 device: "virtio-gpu",
1158 code: 0x20,
1159 })?;
1160
1161 let req_desc_idx = controlq
1162 .alloc_desc()
1163 .ok_or(KernelError::ResourceExhausted {
1164 resource: "virtio_gpu_ctrl_descriptors",
1165 })?;
1166
1167 let resp_desc_idx = controlq.alloc_desc().ok_or_else(|| {
1168 controlq.free_desc(req_desc_idx);
1169 KernelError::ResourceExhausted {
1170 resource: "virtio_gpu_ctrl_descriptors",
1171 }
1172 })?;
1173
1174 if (req_desc_idx as usize) >= self.ctrl_buffers.len()
1175 || (resp_desc_idx as usize) >= self.ctrl_buffers.len()
1176 {
1177 controlq.free_desc(req_desc_idx);
1178 controlq.free_desc(resp_desc_idx);
1179 return Err(KernelError::ResourceExhausted {
1180 resource: "virtio_gpu_ctrl_buffers",
1181 });
1182 }
1183
1184 let req_buf_virt = self.ctrl_buffers[req_desc_idx as usize].virt_addr;
1185 let req_buf_phys = self.ctrl_buffers[req_desc_idx as usize].phys_addr;
1186 let resp_buf_virt = self.ctrl_buffers[resp_desc_idx as usize].virt_addr;
1187 let resp_buf_phys = self.ctrl_buffers[resp_desc_idx as usize].phys_addr;
1188
1189 let req_slice = unsafe { core::slice::from_raw_parts_mut(req_buf_virt as *mut u8, 4096) };
1192 let copy_len = cmd_bytes.len().min(4096);
1193 req_slice[..copy_len].copy_from_slice(&cmd_bytes[..copy_len]);
1194
1195 let resp_slice = unsafe { core::slice::from_raw_parts_mut(resp_buf_virt as *mut u8, 4096) };
1198 resp_slice[..resp_size.min(4096)].fill(0);
1199
1200 controlq.descriptors[req_desc_idx as usize] = VirtqDesc {
1202 addr: req_buf_phys,
1203 len: copy_len as u32,
1204 flags: VIRTQ_DESC_F_NEXT,
1205 next: resp_desc_idx,
1206 };
1207 controlq.descriptors[resp_desc_idx as usize] = VirtqDesc {
1208 addr: resp_buf_phys,
1209 len: resp_size.min(4096) as u32,
1210 flags: VIRTQ_DESC_F_WRITE,
1211 next: 0,
1212 };
1213
1214 controlq.add_to_avail(req_desc_idx);
1215
1216 unsafe {
1219 core::ptr::write_volatile((mmio + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32, 0);
1220 }
1221
1222 let mut timeout = 1_000_000u32;
1224 loop {
1225 if let Some((_used_id, _used_len)) = controlq.get_used() {
1226 let resp = unsafe { *(resp_buf_virt as *const VirtioGpuRespDisplayInfo) };
1230
1231 controlq.free_desc(req_desc_idx);
1232 controlq.free_desc(resp_desc_idx);
1233
1234 if resp.hdr.hdr_type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO {
1235 return Err(Self::response_to_error(resp.hdr.hdr_type));
1236 }
1237
1238 for i in 0..VIRTIO_GPU_MAX_SCANOUTS {
1240 if resp.pmodes[i].enabled != 0 {
1241 return Ok(resp.pmodes[i]);
1242 }
1243 }
1244
1245 if resp.pmodes[0].rect.width > 0 && resp.pmodes[0].rect.height > 0 {
1247 return Ok(resp.pmodes[0]);
1248 }
1249
1250 return Ok(VirtioGpuDisplayOne {
1252 rect: VirtioGpuRect {
1253 x: 0,
1254 y: 0,
1255 width: 1024,
1256 height: 768,
1257 },
1258 enabled: 1,
1259 flags: 0,
1260 });
1261 }
1262
1263 timeout -= 1;
1264 if timeout == 0 {
1265 controlq.free_desc(req_desc_idx);
1266 controlq.free_desc(resp_desc_idx);
1267 return Err(KernelError::Timeout {
1268 operation: "virtio_gpu_get_display_info",
1269 duration_ms: 1000,
1270 });
1271 }
1272
1273 core::hint::spin_loop();
1274 }
1275 }
1276
1277 pub fn create_resource_2d(
1279 &mut self,
1280 resource_id: u32,
1281 format: u32,
1282 width: u32,
1283 height: u32,
1284 ) -> Result<(), KernelError> {
1285 let cmd = VirtioGpuResourceCreate2d {
1286 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
1287 resource_id,
1288 format,
1289 width,
1290 height,
1291 };
1292
1293 self.send_simple_command(&cmd)?;
1294
1295 crate::println!(
1296 "[VIRTIO-GPU] Created 2D resource {} ({}x{}, format={})",
1297 resource_id,
1298 width,
1299 height,
1300 format
1301 );
1302
1303 Ok(())
1304 }
1305
1306 pub fn resource_unref(&mut self, resource_id: u32) -> Result<(), KernelError> {
1308 let cmd = VirtioGpuResourceUnref {
1309 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_UNREF),
1310 resource_id,
1311 padding: 0,
1312 };
1313
1314 self.send_simple_command(&cmd)?;
1315
1316 crate::println!("[VIRTIO-GPU] Destroyed resource {}", resource_id);
1317 Ok(())
1318 }
1319
1320 pub fn attach_backing(
1325 &mut self,
1326 resource_id: u32,
1327 addr: u64,
1328 length: u32,
1329 ) -> Result<(), KernelError> {
1330 #[repr(C)]
1333 #[derive(Clone, Copy)]
1334 struct AttachBackingWithEntry {
1335 cmd: VirtioGpuResourceAttachBacking,
1336 entry: VirtioGpuMemEntry,
1337 }
1338
1339 let combined = AttachBackingWithEntry {
1340 cmd: VirtioGpuResourceAttachBacking {
1341 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
1342 resource_id,
1343 nr_entries: 1,
1344 },
1345 entry: VirtioGpuMemEntry {
1346 addr,
1347 length,
1348 padding: 0,
1349 },
1350 };
1351
1352 let cmd_bytes = unsafe {
1355 core::slice::from_raw_parts(
1356 &combined as *const AttachBackingWithEntry as *const u8,
1357 core::mem::size_of::<AttachBackingWithEntry>(),
1358 )
1359 };
1360
1361 let (resp_hdr, _len) =
1362 self.send_command_raw(cmd_bytes, core::mem::size_of::<VirtioGpuCtrlHdr>())?;
1363
1364 if resp_hdr.hdr_type != VIRTIO_GPU_RESP_OK_NODATA {
1365 return Err(Self::response_to_error(resp_hdr.hdr_type));
1366 }
1367
1368 crate::println!(
1369 "[VIRTIO-GPU] Attached backing for resource {} (addr={:#x}, len={})",
1370 resource_id,
1371 addr,
1372 length
1373 );
1374
1375 Ok(())
1376 }
1377
1378 pub fn detach_backing(&mut self, resource_id: u32) -> Result<(), KernelError> {
1380 let cmd = VirtioGpuResourceDetachBacking {
1381 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
1382 resource_id,
1383 padding: 0,
1384 };
1385
1386 self.send_simple_command(&cmd)
1387 }
1388
1389 pub fn set_scanout(
1391 &mut self,
1392 scanout_id: u32,
1393 resource_id: u32,
1394 rect: VirtioGpuRect,
1395 ) -> Result<(), KernelError> {
1396 let cmd = VirtioGpuSetScanout {
1397 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_SET_SCANOUT),
1398 rect,
1399 scanout_id,
1400 resource_id,
1401 };
1402
1403 self.send_simple_command(&cmd)?;
1404
1405 crate::println!(
1406 "[VIRTIO-GPU] Set scanout {}: resource {} ({}x{}+{}+{})",
1407 scanout_id,
1408 resource_id,
1409 rect.width,
1410 rect.height,
1411 rect.x,
1412 rect.y
1413 );
1414
1415 Ok(())
1416 }
1417
1418 pub fn transfer_to_host_2d(
1420 &mut self,
1421 resource_id: u32,
1422 rect: VirtioGpuRect,
1423 ) -> Result<(), KernelError> {
1424 let cmd = VirtioGpuTransferToHost2d {
1425 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
1426 rect,
1427 offset: 0,
1428 resource_id,
1429 padding: 0,
1430 };
1431
1432 self.send_simple_command(&cmd)
1433 }
1434
1435 pub fn resource_flush(
1437 &mut self,
1438 resource_id: u32,
1439 rect: VirtioGpuRect,
1440 ) -> Result<(), KernelError> {
1441 let cmd = VirtioGpuResourceFlush {
1442 hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
1443 rect,
1444 resource_id,
1445 padding: 0,
1446 };
1447
1448 self.send_simple_command(&cmd)
1449 }
1450
1451 pub fn setup_framebuffer(&mut self) -> Result<(), KernelError> {
1456 if self.width == 0 || self.height == 0 {
1457 return Err(KernelError::InvalidState {
1458 expected: "display_configured",
1459 actual: "no_display",
1460 });
1461 }
1462
1463 let resource_id = self.next_resource_id;
1464 self.next_resource_id += 1;
1465
1466 self.create_resource_2d(resource_id, FORMAT_B8G8R8X8_UNORM, self.width, self.height)?;
1468
1469 let pixel_count = (self.width * self.height) as usize;
1471 let mut backing = alloc::vec![0u32; pixel_count];
1472
1473 for y in 0..self.height {
1475 for x in 0..self.width {
1476 let idx = (y * self.width + x) as usize;
1477 let blue = 32 + (y * 64 / self.height);
1479 let green = 16 + (y * 32 / self.height);
1480 backing[idx] = (blue << 16) | (green << 8) | 0x10;
1481 }
1482 }
1483
1484 let backing_addr = backing.as_ptr() as u64;
1486 let backing_len = (pixel_count * 4) as u32;
1487
1488 self.attach_backing(resource_id, backing_addr, backing_len)?;
1490
1491 let scanout_rect = VirtioGpuRect::new(0, 0, self.width, self.height);
1493 self.set_scanout(0, resource_id, scanout_rect)?;
1494
1495 self.transfer_to_host_2d(resource_id, scanout_rect)?;
1497
1498 self.resource_flush(resource_id, scanout_rect)?;
1500
1501 self.framebuffer_resource_id = resource_id;
1502 self.framebuffer_backing = Some(backing);
1503
1504 crate::println!(
1505 "[VIRTIO-GPU] Framebuffer ready: {}x{} (resource {})",
1506 self.width,
1507 self.height,
1508 resource_id
1509 );
1510
1511 Ok(())
1512 }
1513
1514 pub fn flush_framebuffer(&mut self) -> Result<(), KernelError> {
1519 if self.framebuffer_resource_id == 0 {
1520 return Err(KernelError::InvalidState {
1521 expected: "framebuffer_setup",
1522 actual: "no_framebuffer",
1523 });
1524 }
1525
1526 let rect = VirtioGpuRect::new(0, 0, self.width, self.height);
1527 self.transfer_to_host_2d(self.framebuffer_resource_id, rect)?;
1528 self.resource_flush(self.framebuffer_resource_id, rect)?;
1529
1530 Ok(())
1531 }
1532
1533 pub fn flush_region(&mut self, rect: VirtioGpuRect) -> Result<(), KernelError> {
1538 if self.framebuffer_resource_id == 0 {
1539 return Err(KernelError::InvalidState {
1540 expected: "framebuffer_setup",
1541 actual: "no_framebuffer",
1542 });
1543 }
1544
1545 self.transfer_to_host_2d(self.framebuffer_resource_id, rect)?;
1546 self.resource_flush(self.framebuffer_resource_id, rect)?;
1547
1548 Ok(())
1549 }
1550
1551 pub fn get_framebuffer_mut(&mut self) -> Option<&mut [u32]> {
1557 self.framebuffer_backing.as_deref_mut()
1558 }
1559
1560 pub fn get_framebuffer(&self) -> Option<&[u32]> {
1562 self.framebuffer_backing.as_deref()
1563 }
1564
1565 pub fn width(&self) -> u32 {
1567 self.width
1568 }
1569
1570 pub fn height(&self) -> u32 {
1572 self.height
1573 }
1574
1575 pub fn is_ready(&self) -> bool {
1577 self.state == GpuDeviceState::Ready
1578 }
1579
1580 pub fn supports_edid(&self) -> bool {
1582 self.features & VIRTIO_GPU_F_EDID != 0
1583 }
1584
1585 pub fn supports_virgl(&self) -> bool {
1587 self.features & VIRTIO_GPU_F_VIRGL != 0
1588 }
1589
1590 pub fn framebuffer_resource_id(&self) -> u32 {
1592 self.framebuffer_resource_id
1593 }
1594
1595 pub fn alloc_resource_id(&mut self) -> u32 {
1597 let id = self.next_resource_id;
1598 self.next_resource_id += 1;
1599 id
1600 }
1601
1602 pub fn set_pixel(&mut self, x: u32, y: u32, color: u32) -> Result<(), KernelError> {
1607 if x >= self.width || y >= self.height {
1608 return Err(KernelError::InvalidArgument {
1609 name: "coordinates",
1610 value: "out_of_bounds",
1611 });
1612 }
1613
1614 if let Some(ref mut fb) = self.framebuffer_backing {
1615 let idx = (y * self.width + x) as usize;
1616 if idx < fb.len() {
1617 fb[idx] = color;
1618 }
1619 }
1620
1621 Ok(())
1622 }
1623
1624 pub fn fill_rect(
1628 &mut self,
1629 x: u32,
1630 y: u32,
1631 w: u32,
1632 h: u32,
1633 color: u32,
1634 ) -> Result<(), KernelError> {
1635 if let Some(ref mut fb) = self.framebuffer_backing {
1636 let width = self.width;
1637 let height = self.height;
1638
1639 for dy in 0..h {
1640 let row_y = y + dy;
1641 if row_y >= height {
1642 break;
1643 }
1644 for dx in 0..w {
1645 let col_x = x + dx;
1646 if col_x >= width {
1647 break;
1648 }
1649 let idx = (row_y * width + col_x) as usize;
1650 if idx < fb.len() {
1651 fb[idx] = color;
1652 }
1653 }
1654 }
1655 }
1656
1657 Ok(())
1658 }
1659
1660 pub fn blit(
1665 &mut self,
1666 buffer: &[u32],
1667 x: u32,
1668 y: u32,
1669 w: u32,
1670 h: u32,
1671 ) -> Result<(), KernelError> {
1672 if let Some(ref mut fb) = self.framebuffer_backing {
1673 let fb_width = self.width;
1674 let fb_height = self.height;
1675
1676 for dy in 0..h {
1677 let row_y = y + dy;
1678 if row_y >= fb_height {
1679 break;
1680 }
1681 for dx in 0..w {
1682 let col_x = x + dx;
1683 if col_x >= fb_width {
1684 break;
1685 }
1686 let src_idx = (dy * w + dx) as usize;
1687 let dst_idx = (row_y * fb_width + col_x) as usize;
1688 if src_idx < buffer.len() && dst_idx < fb.len() {
1689 fb[dst_idx] = buffer[src_idx];
1690 }
1691 }
1692 }
1693 }
1694
1695 Ok(())
1696 }
1697
1698 pub fn clear(&mut self, color: u32) {
1702 if let Some(ref mut fb) = self.framebuffer_backing {
1703 fb.fill(color);
1704 }
1705 }
1706}
1707
1708#[cfg(target_arch = "x86_64")]
1716pub fn probe_pci() -> Option<usize> {
1717 if !crate::drivers::pci::is_pci_initialized() {
1718 return None;
1719 }
1720
1721 let bus = crate::drivers::pci::get_pci_bus().lock();
1722
1723 let devices = bus.find_devices_by_id(0x1AF4, 0x1050);
1725
1726 if let Some(dev) = devices.first() {
1727 crate::println!(
1728 "[VIRTIO-GPU] Found PCI device {:04x}:{:04x} at {:02x}:{:02x}.{}",
1729 dev.vendor_id,
1730 dev.device_id,
1731 dev.location.bus,
1732 dev.location.device,
1733 dev.location.function
1734 );
1735
1736 if let Some(bar) = dev.bars.first() {
1738 if let Some(phys_addr) = bar.get_memory_address() {
1739 crate::println!("[VIRTIO-GPU] BAR0 physical address: {:#x}", phys_addr);
1740 return crate::arch::x86_64::msr::phys_to_virt(phys_addr as usize);
1742 }
1743 }
1744
1745 crate::println!("[VIRTIO-GPU] No usable BAR found");
1746 }
1747
1748 let legacy_devices = bus.find_devices_by_id(0x1AF4, 0x1040);
1751 for dev in &legacy_devices {
1752 crate::println!(
1754 "[VIRTIO-GPU] Found transitional VirtIO PCI device {:04x}:{:04x}",
1755 dev.vendor_id,
1756 dev.device_id
1757 );
1758
1759 if let Some(bar) = dev.bars.first() {
1760 if let Some(phys_addr) = bar.get_memory_address() {
1761 return crate::arch::x86_64::msr::phys_to_virt(phys_addr as usize);
1762 }
1763 }
1764 }
1765
1766 None
1767}
1768
1769#[cfg(not(target_arch = "x86_64"))]
1771pub fn probe_pci() -> Option<usize> {
1772 None
1773}
1774
1775#[cfg(target_arch = "x86_64")]
1777pub fn enumerate_gpu_devices() -> Vec<(u16, u16, u8, u8)> {
1778 let mut result = Vec::new();
1779
1780 if !crate::drivers::pci::is_pci_initialized() {
1781 return result;
1782 }
1783
1784 let bus = crate::drivers::pci::get_pci_bus().lock();
1785 let display_devices = bus.find_devices_by_class(crate::drivers::pci::class_codes::DISPLAY);
1786
1787 for dev in &display_devices {
1788 result.push((dev.vendor_id, dev.device_id, dev.class_code, dev.subclass));
1789 }
1790
1791 result
1792}
1793
1794#[cfg(not(target_arch = "x86_64"))]
1796pub fn enumerate_gpu_devices() -> Vec<(u16, u16, u8, u8)> {
1797 Vec::new()
1798}
1799
1800static VIRTIO_GPU: spin::Mutex<Option<VirtioGpuDriver>> = spin::Mutex::new(None);
1806
1807pub fn init() -> Result<(), KernelError> {
1812 crate::println!("[VIRTIO-GPU] Probing for virtio-gpu device...");
1813
1814 if let Some(mmio_base) = probe_pci() {
1816 crate::println!("[VIRTIO-GPU] MMIO base: {:#x}", mmio_base);
1817
1818 match VirtioGpuDriver::new(mmio_base) {
1819 Ok(driver) => {
1820 crate::println!(
1821 "[VIRTIO-GPU] Driver initialized: {}x{} (resource {})",
1822 driver.width(),
1823 driver.height(),
1824 driver.framebuffer_resource_id()
1825 );
1826 *VIRTIO_GPU.lock() = Some(driver);
1827 return Ok(());
1828 }
1829 Err(e) => {
1830 crate::println!("[VIRTIO-GPU] Init failed: {:?}", e);
1831 return Err(e);
1832 }
1833 }
1834 }
1835
1836 crate::println!("[VIRTIO-GPU] No virtio-gpu device found");
1837 Ok(())
1838}
1839
1840pub fn with_driver<R, F: FnOnce(&mut VirtioGpuDriver) -> R>(f: F) -> Option<R> {
1842 VIRTIO_GPU.lock().as_mut().map(f)
1843}
1844
1845pub fn is_available() -> bool {
1847 VIRTIO_GPU.lock().is_some()
1848}
1849
1850pub fn flush_framebuffer() -> Result<(), KernelError> {
1854 if let Some(ref mut driver) = *VIRTIO_GPU.lock() {
1855 driver.flush_framebuffer()
1856 } else {
1857 Err(KernelError::InvalidState {
1858 expected: "virtio_gpu_initialized",
1859 actual: "no_driver",
1860 })
1861 }
1862}
1863
1864pub fn get_display_size() -> Option<(u32, u32)> {
1866 VIRTIO_GPU.lock().as_ref().map(|d| (d.width(), d.height()))
1867}
1868
1869#[cfg(test)]
1870mod tests {
1871 use super::*;
1872
1873 #[test]
1874 fn test_gpu_constants() {
1875 assert_eq!(VIRTIO_GPU_CMD_GET_DISPLAY_INFO, 0x100);
1876 assert_eq!(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, 0x101);
1877 assert_eq!(VIRTIO_GPU_RESP_OK_NODATA, 0x1100);
1878 assert_eq!(VIRTIO_GPU_RESP_ERR_UNSPEC, 0x1200);
1879 assert_eq!(FORMAT_B8G8R8A8_UNORM, 1);
1880 }
1881
1882 #[test]
1883 fn test_ctrl_hdr_size() {
1884 assert_eq!(core::mem::size_of::<VirtioGpuCtrlHdr>(), 24);
1886 }
1887
1888 #[test]
1889 fn test_rect() {
1890 let rect = VirtioGpuRect::new(10, 20, 800, 600);
1891 assert_eq!(rect.x, 10);
1892 assert_eq!(rect.y, 20);
1893 assert_eq!(rect.width, 800);
1894 assert_eq!(rect.height, 600);
1895 }
1896
1897 #[test]
1898 fn test_display_one_size() {
1899 assert_eq!(core::mem::size_of::<VirtioGpuDisplayOne>(), 24);
1901 }
1902
1903 #[test]
1904 fn test_resource_create_2d_size() {
1905 assert_eq!(core::mem::size_of::<VirtioGpuResourceCreate2d>(), 40);
1907 }
1908
1909 #[test]
1910 fn test_mem_entry_size() {
1911 assert_eq!(core::mem::size_of::<VirtioGpuMemEntry>(), 16);
1913 }
1914
1915 #[test]
1916 fn test_response_to_error() {
1917 let err = VirtioGpuDriver::response_to_error(VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY);
1918 match err {
1919 KernelError::OutOfMemory { .. } => {}
1920 _ => panic!("Expected OutOfMemory error"),
1921 }
1922
1923 let err = VirtioGpuDriver::response_to_error(VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID);
1924 match err {
1925 KernelError::InvalidArgument { name, .. } => {
1926 assert_eq!(name, "scanout_id");
1927 }
1928 _ => panic!("Expected InvalidArgument error"),
1929 }
1930 }
1931}