⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/drivers/
virtio_gpu.rs

1//! VirtIO GPU Driver
2//!
3//! Driver for paravirtualized GPU devices using the VirtIO protocol.
4//! Commonly used in QEMU/KVM virtual machines for 2D/3D rendering.
5//!
6//! Implements the VirtIO MMIO transport with proper status negotiation,
7//! virtqueue setup via frame allocator DMA buffers, and 2D rendering
8//! operations (resource creation, scanout, transfer, flush).
9//!
10//! ## Architecture
11//!
12//! The driver uses the VirtIO 1.0+ modern MMIO transport interface with
13//! two virtqueues:
14//! - **controlq** (queue 0): All 2D/3D commands and responses
15//! - **cursorq** (queue 1): Hardware cursor updates (optional)
16//!
17//! ## Supported Operations
18//!
19//! - Display info query (GET_DISPLAY_INFO)
20//! - 2D resource creation (RESOURCE_CREATE_2D)
21//! - Backing store attachment (RESOURCE_ATTACH_BACKING)
22//! - Scanout configuration (SET_SCANOUT)
23//! - Host transfer (TRANSFER_TO_HOST_2D)
24//! - Display flush (RESOURCE_FLUSH)
25//! - EDID query (GET_EDID, if supported)
26
27// Allow dead code for VirtIO GPU protocol constants, structures, and methods
28// not yet fully exercised by callers during Phase 7 bringup.
29#![allow(dead_code, clippy::needless_range_loop)]
30
31use alloc::vec::Vec;
32
33use crate::error::KernelError;
34
35// ============================================================================
36// VirtIO GPU Protocol Constants
37// ============================================================================
38
39// --- Command types ---
40
41/// Get display info (returns display modes for all scanouts)
42const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x100;
43/// Create a 2D resource (host-side texture)
44const VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: u32 = 0x101;
45/// Destroy a 2D resource
46const VIRTIO_GPU_CMD_RESOURCE_UNREF: u32 = 0x102;
47/// Set scanout (bind resource to display output)
48const VIRTIO_GPU_CMD_SET_SCANOUT: u32 = 0x103;
49/// Flush resource to display
50const VIRTIO_GPU_CMD_RESOURCE_FLUSH: u32 = 0x104;
51/// Transfer data from guest to host resource
52const VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: u32 = 0x105;
53/// Attach backing store pages to a resource
54const VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: u32 = 0x106;
55/// Detach backing store from a resource
56const VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: u32 = 0x107;
57/// Get capability set info (3D)
58const VIRTIO_GPU_CMD_GET_CAPSET_INFO: u32 = 0x108;
59/// Get capability set data (3D)
60const VIRTIO_GPU_CMD_GET_CAPSET: u32 = 0x109;
61/// Get EDID data for a scanout
62const VIRTIO_GPU_CMD_GET_EDID: u32 = 0x10A;
63
64// --- Response types ---
65
66/// Success, no data payload
67const VIRTIO_GPU_RESP_OK_NODATA: u32 = 0x1100;
68/// Success, display info payload
69const VIRTIO_GPU_RESP_OK_DISPLAY_INFO: u32 = 0x1101;
70/// Success, capset info payload
71const VIRTIO_GPU_RESP_OK_CAPSET_INFO: u32 = 0x1102;
72/// Success, capset data payload
73const VIRTIO_GPU_RESP_OK_CAPSET: u32 = 0x1103;
74/// Success, EDID data payload
75const VIRTIO_GPU_RESP_OK_EDID: u32 = 0x1104;
76
77/// Error: unspecified
78const VIRTIO_GPU_RESP_ERR_UNSPEC: u32 = 0x1200;
79/// Error: out of memory on host
80const VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY: u32 = 0x1201;
81/// Error: invalid scanout ID
82const VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID: u32 = 0x1202;
83/// Error: invalid resource ID
84const VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID: u32 = 0x1203;
85/// Error: invalid context ID
86const VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID: u32 = 0x1204;
87/// Error: invalid parameter
88const VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER: u32 = 0x1205;
89
90// --- Pixel formats ---
91
92/// B8G8R8A8 (BGRA with alpha, native for many displays)
93const FORMAT_B8G8R8A8_UNORM: u32 = 1;
94/// R8G8B8A8 (RGBA)
95const FORMAT_R8G8B8A8_UNORM: u32 = 67;
96/// B8G8R8X8 (BGRX, alpha ignored)
97const FORMAT_B8G8R8X8_UNORM: u32 = 68;
98/// R8G8B8X8 (RGBX, alpha ignored)
99const FORMAT_R8G8B8X8_UNORM: u32 = 134;
100
101// --- Feature bits ---
102
103/// Device supports 3D (virgl) commands
104const VIRTIO_GPU_F_VIRGL: u64 = 1 << 0;
105/// Device supports EDID queries
106const VIRTIO_GPU_F_EDID: u64 = 1 << 1;
107
108// --- Max scanouts per the spec ---
109const VIRTIO_GPU_MAX_SCANOUTS: usize = 16;
110
111// ============================================================================
112// VirtIO MMIO Register Offsets (modern interface, matches virtio_net.rs)
113// ============================================================================
114
115const VIRTIO_MMIO_MAGIC: usize = 0x00;
116const VIRTIO_MMIO_VERSION: usize = 0x04;
117const VIRTIO_MMIO_DEVICE_ID: usize = 0x08;
118const VIRTIO_MMIO_DEVICE_FEATURES: usize = 0x10;
119const VIRTIO_MMIO_DEVICE_FEATURES_SEL: usize = 0x14;
120const VIRTIO_MMIO_DRIVER_FEATURES: usize = 0x20;
121const VIRTIO_MMIO_DRIVER_FEATURES_SEL: usize = 0x24;
122const VIRTIO_MMIO_QUEUE_SEL: usize = 0x30;
123const VIRTIO_MMIO_QUEUE_NUM_MAX: usize = 0x34;
124const VIRTIO_MMIO_QUEUE_NUM: usize = 0x38;
125const VIRTIO_MMIO_QUEUE_READY: usize = 0x44;
126const VIRTIO_MMIO_QUEUE_NOTIFY: usize = 0x50;
127const VIRTIO_MMIO_STATUS: usize = 0x70;
128const VIRTIO_MMIO_QUEUE_DESC_LOW: usize = 0x80;
129const VIRTIO_MMIO_QUEUE_DESC_HIGH: usize = 0x84;
130const VIRTIO_MMIO_QUEUE_AVAIL_LOW: usize = 0x90;
131const VIRTIO_MMIO_QUEUE_AVAIL_HIGH: usize = 0x94;
132const VIRTIO_MMIO_QUEUE_USED_LOW: usize = 0xA0;
133const VIRTIO_MMIO_QUEUE_USED_HIGH: usize = 0xA4;
134const VIRTIO_MMIO_CONFIG_BASE: usize = 0x100;
135
136// VirtIO status bits
137const VIRTIO_STATUS_ACKNOWLEDGE: u32 = 1;
138const VIRTIO_STATUS_DRIVER: u32 = 2;
139const VIRTIO_STATUS_DRIVER_OK: u32 = 4;
140const VIRTIO_STATUS_FEATURES_OK: u32 = 8;
141
142/// Descriptor flags: next descriptor exists (chained)
143const VIRTQ_DESC_F_NEXT: u16 = 1;
144/// Descriptor flags: buffer is device-writable
145const VIRTQ_DESC_F_WRITE: u16 = 2;
146
147// ============================================================================
148// VirtIO GPU Protocol Structures
149// ============================================================================
150
151/// VirtIO GPU control header -- common prefix for all commands and responses.
152#[repr(C)]
153#[derive(Debug, Clone, Copy)]
154struct VirtioGpuCtrlHdr {
155    /// Command or response type
156    hdr_type: u32,
157    /// Flags (e.g. VIRTIO_GPU_FLAG_FENCE)
158    flags: u32,
159    /// Fence ID for synchronization
160    fence_id: u64,
161    /// 3D rendering context ID (0 for 2D)
162    ctx_id: u32,
163    /// Ring index (virtio-gpu multi-queue extension)
164    ring_idx: u8,
165    /// Padding to maintain alignment
166    padding: [u8; 3],
167}
168
169impl VirtioGpuCtrlHdr {
170    /// Create a new command header with the given type.
171    fn new(hdr_type: u32) -> Self {
172        Self {
173            hdr_type,
174            flags: 0,
175            fence_id: 0,
176            ctx_id: 0,
177            ring_idx: 0,
178            padding: [0; 3],
179        }
180    }
181}
182
183/// Rectangle structure for GPU commands.
184#[repr(C)]
185#[derive(Debug, Clone, Copy)]
186pub struct VirtioGpuRect {
187    /// X coordinate
188    pub x: u32,
189    /// Y coordinate
190    pub y: u32,
191    /// Width in pixels
192    pub width: u32,
193    /// Height in pixels
194    pub height: u32,
195}
196
197impl VirtioGpuRect {
198    /// Create a new rectangle.
199    pub fn new(x: u32, y: u32, width: u32, height: u32) -> Self {
200        Self {
201            x,
202            y,
203            width,
204            height,
205        }
206    }
207}
208
209/// Display mode information for one scanout.
210#[repr(C)]
211#[derive(Debug, Clone, Copy)]
212pub struct VirtioGpuDisplayOne {
213    /// Active display rectangle (position and size)
214    rect: VirtioGpuRect,
215    /// Whether this scanout is enabled
216    enabled: u32,
217    /// Scanout flags
218    flags: u32,
219}
220
221/// Response to GET_DISPLAY_INFO command.
222#[repr(C)]
223#[derive(Debug, Clone, Copy)]
224struct VirtioGpuRespDisplayInfo {
225    /// Response header
226    hdr: VirtioGpuCtrlHdr,
227    /// Display modes for up to 16 scanouts
228    pmodes: [VirtioGpuDisplayOne; VIRTIO_GPU_MAX_SCANOUTS],
229}
230
231/// RESOURCE_CREATE_2D command structure.
232#[repr(C)]
233#[derive(Debug, Clone, Copy)]
234struct VirtioGpuResourceCreate2d {
235    /// Command header
236    hdr: VirtioGpuCtrlHdr,
237    /// Unique resource identifier
238    resource_id: u32,
239    /// Pixel format (FORMAT_B8G8R8A8_UNORM etc.)
240    format: u32,
241    /// Width in pixels
242    width: u32,
243    /// Height in pixels
244    height: u32,
245}
246
247/// RESOURCE_UNREF command structure.
248#[repr(C)]
249#[derive(Debug, Clone, Copy)]
250struct VirtioGpuResourceUnref {
251    /// Command header
252    hdr: VirtioGpuCtrlHdr,
253    /// Resource to destroy
254    resource_id: u32,
255    /// Padding
256    padding: u32,
257}
258
259/// RESOURCE_ATTACH_BACKING command structure.
260///
261/// Followed immediately in the descriptor by `nr_entries` VirtioGpuMemEntry
262/// elements.
263#[repr(C)]
264#[derive(Debug, Clone, Copy)]
265struct VirtioGpuResourceAttachBacking {
266    /// Command header
267    hdr: VirtioGpuCtrlHdr,
268    /// Resource to attach backing to
269    resource_id: u32,
270    /// Number of memory entries following this struct
271    nr_entries: u32,
272}
273
274/// A single memory entry for RESOURCE_ATTACH_BACKING.
275#[repr(C)]
276#[derive(Debug, Clone, Copy)]
277struct VirtioGpuMemEntry {
278    /// Physical address of the backing page
279    addr: u64,
280    /// Length in bytes
281    length: u32,
282    /// Padding
283    padding: u32,
284}
285
286/// SET_SCANOUT command structure.
287#[repr(C)]
288#[derive(Debug, Clone, Copy)]
289struct VirtioGpuSetScanout {
290    /// Command header
291    hdr: VirtioGpuCtrlHdr,
292    /// Rectangle within the resource to display
293    rect: VirtioGpuRect,
294    /// Scanout index (display output)
295    scanout_id: u32,
296    /// Resource to display
297    resource_id: u32,
298}
299
300/// TRANSFER_TO_HOST_2D command structure.
301#[repr(C)]
302#[derive(Debug, Clone, Copy)]
303struct VirtioGpuTransferToHost2d {
304    /// Command header
305    hdr: VirtioGpuCtrlHdr,
306    /// Rectangle within the resource to transfer
307    rect: VirtioGpuRect,
308    /// Byte offset within the resource backing store
309    offset: u64,
310    /// Resource to transfer
311    resource_id: u32,
312    /// Padding
313    padding: u32,
314}
315
316/// RESOURCE_FLUSH command structure.
317#[repr(C)]
318#[derive(Debug, Clone, Copy)]
319struct VirtioGpuResourceFlush {
320    /// Command header
321    hdr: VirtioGpuCtrlHdr,
322    /// Rectangle to flush to display
323    rect: VirtioGpuRect,
324    /// Resource to flush
325    resource_id: u32,
326    /// Padding
327    padding: u32,
328}
329
330/// RESOURCE_DETACH_BACKING command structure.
331#[repr(C)]
332#[derive(Debug, Clone, Copy)]
333struct VirtioGpuResourceDetachBacking {
334    /// Command header
335    hdr: VirtioGpuCtrlHdr,
336    /// Resource to detach backing from
337    resource_id: u32,
338    /// Padding
339    padding: u32,
340}
341
342/// GET_EDID command structure.
343#[repr(C)]
344#[derive(Debug, Clone, Copy)]
345struct VirtioGpuGetEdid {
346    /// Command header
347    hdr: VirtioGpuCtrlHdr,
348    /// Scanout to query EDID for
349    scanout: u32,
350    /// Padding
351    padding: u32,
352}
353
354/// GET_EDID response structure.
355#[repr(C)]
356#[derive(Debug, Clone, Copy)]
357struct VirtioGpuRespEdid {
358    /// Response header
359    hdr: VirtioGpuCtrlHdr,
360    /// Size of valid EDID data
361    size: u32,
362    /// Padding
363    padding: u32,
364    /// Raw EDID data (up to 1024 bytes)
365    edid: [u8; 1024],
366}
367
368// ============================================================================
369// VirtIO Ring Structures (same layout as virtio_net.rs)
370// ============================================================================
371
372/// VirtIO Ring Descriptor
373#[repr(C)]
374#[derive(Debug, Clone, Copy)]
375struct VirtqDesc {
376    /// Guest physical address of the buffer
377    addr: u64,
378    /// Length of the buffer in bytes
379    len: u32,
380    /// Descriptor flags (NEXT, WRITE, INDIRECT)
381    flags: u16,
382    /// Index of the next descriptor in the chain
383    next: u16,
384}
385
386/// VirtIO Ring Available
387#[repr(C)]
388struct VirtqAvail {
389    flags: u16,
390    idx: u16,
391    ring: [u16; 256],
392    used_event: u16,
393}
394
395/// VirtIO Ring Used Element
396#[repr(C)]
397#[derive(Debug, Clone, Copy)]
398struct VirtqUsedElem {
399    id: u32,
400    len: u32,
401}
402
403/// VirtIO Ring Used
404#[repr(C)]
405struct VirtqUsed {
406    flags: u16,
407    idx: u16,
408    ring: [VirtqUsedElem; 256],
409    avail_event: u16,
410}
411
412/// VirtIO Virtqueue -- manages a descriptor ring, available ring, and used
413/// ring.
414struct Virtqueue {
415    /// Queue size (number of descriptors)
416    size: u16,
417
418    /// Descriptor table
419    descriptors: &'static mut [VirtqDesc],
420
421    /// Available ring
422    avail: &'static mut VirtqAvail,
423
424    /// Used ring
425    used: &'static mut VirtqUsed,
426
427    /// Free descriptor list head
428    free_head: u16,
429
430    /// Last seen used index
431    last_used_idx: u16,
432
433    /// Number of free descriptors
434    num_free: u16,
435}
436
437impl Virtqueue {
438    /// Create a new virtqueue from pre-allocated memory regions.
439    fn new(
440        descriptors: &'static mut [VirtqDesc],
441        avail: &'static mut VirtqAvail,
442        used: &'static mut VirtqUsed,
443        size: u16,
444    ) -> Self {
445        // Initialize descriptor free list
446        for i in 0..size {
447            descriptors[i as usize].next = if i + 1 < size { i + 1 } else { 0 };
448        }
449
450        // Initialize rings
451        avail.flags = 0;
452        avail.idx = 0;
453        used.flags = 0;
454        used.idx = 0;
455
456        Self {
457            size,
458            descriptors,
459            avail,
460            used,
461            free_head: 0,
462            last_used_idx: 0,
463            num_free: size,
464        }
465    }
466
467    /// Allocate a free descriptor.
468    fn alloc_desc(&mut self) -> Option<u16> {
469        if self.num_free == 0 {
470            return None;
471        }
472
473        let desc_idx = self.free_head;
474        self.free_head = self.descriptors[desc_idx as usize].next;
475        self.num_free -= 1;
476
477        Some(desc_idx)
478    }
479
480    /// Return a descriptor to the free list.
481    fn free_desc(&mut self, desc_idx: u16) {
482        self.descriptors[desc_idx as usize].next = self.free_head;
483        self.free_head = desc_idx;
484        self.num_free += 1;
485    }
486
487    /// Add a descriptor to the available ring and advance the index.
488    fn add_to_avail(&mut self, desc_idx: u16) {
489        let avail_idx = self.avail.idx as usize % self.size as usize;
490        self.avail.ring[avail_idx] = desc_idx;
491
492        // Memory barrier to ensure descriptor writes are visible before idx update
493        core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
494
495        self.avail.idx = self.avail.idx.wrapping_add(1);
496    }
497
498    /// Check for completed buffers in the used ring.
499    fn get_used(&mut self) -> Option<(u16, u32)> {
500        if self.last_used_idx == self.used.idx {
501            return None;
502        }
503
504        let used_idx = self.last_used_idx as usize % self.size as usize;
505        let used_elem = self.used.ring[used_idx];
506
507        self.last_used_idx = self.last_used_idx.wrapping_add(1);
508
509        Some((used_elem.id as u16, used_elem.len))
510    }
511}
512
513/// DMA buffer region backing a virtqueue.
514///
515/// Stores the virtual address of frame-allocator-provided pages
516/// used for the descriptor table, available ring, and used ring.
517struct VirtqueueDmaRegion {
518    /// Virtual address of allocated pages
519    virt_addr: usize,
520    /// Number of 4KB pages allocated
521    num_pages: usize,
522}
523
524/// Per-descriptor data buffer (single 4KB page or larger).
525struct DataBuffer {
526    virt_addr: usize,
527    phys_addr: u64,
528}
529
530// ============================================================================
531// VirtIO GPU Driver State
532// ============================================================================
533
534/// GPU device initialization state.
535#[derive(Debug, Clone, Copy, PartialEq, Eq)]
536enum GpuDeviceState {
537    /// Device not yet initialized
538    Uninitialized,
539    /// Device initialized and ready
540    Ready,
541    /// Device encountered an error
542    Error,
543}
544
545/// VirtIO GPU Driver
546///
547/// Manages a single virtio-gpu device including its control and cursor
548/// virtqueues, display configuration, and framebuffer resources.
549pub struct VirtioGpuDriver {
550    /// MMIO base address (virtual)
551    mmio_base: usize,
552
553    /// Negotiated device features
554    features: u64,
555
556    /// Device state
557    state: GpuDeviceState,
558
559    /// Control virtqueue (queue index 0)
560    controlq: Option<Virtqueue>,
561    /// Cursor virtqueue (queue index 1)
562    cursorq: Option<Virtqueue>,
563
564    /// DMA region backing the control virtqueue rings
565    ctrl_dma: Option<VirtqueueDmaRegion>,
566    /// DMA region backing the cursor virtqueue rings
567    cursor_dma: Option<VirtqueueDmaRegion>,
568
569    /// Per-descriptor data buffers for the control queue
570    ctrl_buffers: Vec<DataBuffer>,
571    /// Per-descriptor data buffers for the cursor queue
572    cursor_buffers: Vec<DataBuffer>,
573
574    /// Detected display info for the first enabled scanout
575    display_info: Option<VirtioGpuDisplayOne>,
576
577    /// Next resource ID to allocate
578    next_resource_id: u32,
579    /// Resource ID bound to the primary framebuffer
580    framebuffer_resource_id: u32,
581    /// Backing pixel buffer for the framebuffer resource
582    framebuffer_backing: Option<Vec<u32>>,
583
584    /// Display width in pixels
585    width: u32,
586    /// Display height in pixels
587    height: u32,
588}
589
590impl VirtioGpuDriver {
591    /// Create and initialize a new VirtIO GPU driver at the given MMIO base.
592    pub fn new(mmio_base: usize) -> Result<Self, KernelError> {
593        let mut driver = Self {
594            mmio_base,
595            features: 0,
596            state: GpuDeviceState::Uninitialized,
597            controlq: None,
598            cursorq: None,
599            ctrl_dma: None,
600            cursor_dma: None,
601            ctrl_buffers: Vec::new(),
602            cursor_buffers: Vec::new(),
603            display_info: None,
604            next_resource_id: 1,
605            framebuffer_resource_id: 0,
606            framebuffer_backing: None,
607            width: 0,
608            height: 0,
609        };
610
611        driver.initialize()?;
612        Ok(driver)
613    }
614
615    // ---- MMIO register access ----
616
617    /// Read a 32-bit MMIO register.
618    fn read_reg(&self, offset: usize) -> u32 {
619        // SAFETY: Reading a VirtIO MMIO register at mmio_base + offset. The
620        // mmio_base is the device's memory-mapped I/O base from the device tree
621        // or PCI BAR. read_volatile prevents compiler reordering of hardware
622        // register accesses.
623        unsafe { core::ptr::read_volatile((self.mmio_base + offset) as *const u32) }
624    }
625
626    /// Write a 32-bit MMIO register.
627    fn write_reg(&self, offset: usize, value: u32) {
628        // SAFETY: Writing a VirtIO MMIO register. Same invariants as read_reg.
629        unsafe {
630            core::ptr::write_volatile((self.mmio_base + offset) as *mut u32, value);
631        }
632    }
633
634    // ---- Device initialization ----
635
636    /// Initialize the VirtIO GPU device following the VirtIO 1.0+ sequence:
637    ///
638    /// 1. Reset device
639    /// 2. Set ACKNOWLEDGE
640    /// 3. Set DRIVER
641    /// 4. Negotiate features
642    /// 5. Set FEATURES_OK and verify
643    /// 6. Setup virtqueues (control=0, cursor=1)
644    /// 7. Set DRIVER_OK
645    /// 8. Query display info and setup framebuffer
646    fn initialize(&mut self) -> Result<(), KernelError> {
647        // Validate magic number
648        let magic = self.read_reg(VIRTIO_MMIO_MAGIC);
649        if magic != 0x74726976 {
650            // "virt" in little-endian
651            crate::println!(
652                "[VIRTIO-GPU] Invalid magic: {:#010x} (expected 0x74726976)",
653                magic
654            );
655            return Err(KernelError::HardwareError {
656                device: "virtio-gpu",
657                code: 0x01,
658            });
659        }
660
661        // Validate device ID (16 = GPU device)
662        let device_id = self.read_reg(VIRTIO_MMIO_DEVICE_ID);
663        if device_id != 16 {
664            crate::println!(
665                "[VIRTIO-GPU] Unexpected device ID: {} (expected 16)",
666                device_id
667            );
668            return Err(KernelError::HardwareError {
669                device: "virtio-gpu",
670                code: 0x02,
671            });
672        }
673
674        let version = self.read_reg(VIRTIO_MMIO_VERSION);
675        crate::println!(
676            "[VIRTIO-GPU] Found virtio-gpu device (MMIO version {})",
677            version
678        );
679
680        // Step 1: Reset device
681        self.write_reg(VIRTIO_MMIO_STATUS, 0);
682
683        // Step 2: Set ACKNOWLEDGE status bit
684        self.write_reg(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE);
685
686        // Step 3: Set DRIVER status bit
687        self.write_reg(
688            VIRTIO_MMIO_STATUS,
689            VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER,
690        );
691
692        // Step 4: Read and negotiate features
693        self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
694        let features_low = self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64;
695        self.write_reg(VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
696        let features_high = (self.read_reg(VIRTIO_MMIO_DEVICE_FEATURES) as u64) << 32;
697        self.features = features_low | features_high;
698
699        crate::println!("[VIRTIO-GPU] Device features: {:#018x}", self.features);
700
701        if self.features & VIRTIO_GPU_F_VIRGL != 0 {
702            crate::println!("[VIRTIO-GPU]   - VIRGL (3D) supported");
703        }
704        if self.features & VIRTIO_GPU_F_EDID != 0 {
705            crate::println!("[VIRTIO-GPU]   - EDID supported");
706        }
707
708        // Accept EDID if available, but do NOT request VIRGL (we only do 2D)
709        let driver_features = self.features & VIRTIO_GPU_F_EDID;
710        self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
711        self.write_reg(
712            VIRTIO_MMIO_DRIVER_FEATURES,
713            (driver_features & 0xFFFFFFFF) as u32,
714        );
715        self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
716        self.write_reg(VIRTIO_MMIO_DRIVER_FEATURES, (driver_features >> 32) as u32);
717
718        // Step 5: Set FEATURES_OK and verify
719        self.write_reg(
720            VIRTIO_MMIO_STATUS,
721            VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER | VIRTIO_STATUS_FEATURES_OK,
722        );
723
724        if (self.read_reg(VIRTIO_MMIO_STATUS) & VIRTIO_STATUS_FEATURES_OK) == 0 {
725            crate::println!("[VIRTIO-GPU] Device did not accept features");
726            return Err(KernelError::HardwareError {
727                device: "virtio-gpu",
728                code: 0x03,
729            });
730        }
731
732        // Step 6: Set up virtqueues
733        self.setup_control_queue()?;
734        self.setup_cursor_queue()?;
735
736        // Step 7: Set DRIVER_OK -- device is live
737        self.write_reg(
738            VIRTIO_MMIO_STATUS,
739            VIRTIO_STATUS_ACKNOWLEDGE
740                | VIRTIO_STATUS_DRIVER
741                | VIRTIO_STATUS_FEATURES_OK
742                | VIRTIO_STATUS_DRIVER_OK,
743        );
744
745        crate::println!("[VIRTIO-GPU] Device status: DRIVER_OK");
746
747        // Step 8: Query display info
748        match self.get_display_info() {
749            Ok(display) => {
750                self.width = display.rect.width;
751                self.height = display.rect.height;
752                self.display_info = Some(display);
753
754                crate::println!(
755                    "[VIRTIO-GPU] Display: {}x{} (enabled={})",
756                    self.width,
757                    self.height,
758                    display.enabled
759                );
760
761                // Set up the primary framebuffer
762                if let Err(e) = self.setup_framebuffer() {
763                    crate::println!("[VIRTIO-GPU] Framebuffer setup failed: {:?}", e);
764                    // Non-fatal: driver is still usable for manual operations
765                }
766            }
767            Err(e) => {
768                crate::println!("[VIRTIO-GPU] Display info query failed: {:?}", e);
769                // Use default resolution
770                self.width = 1024;
771                self.height = 768;
772            }
773        }
774
775        self.state = GpuDeviceState::Ready;
776        Ok(())
777    }
778
779    /// Set up the control virtqueue (queue index 0).
780    fn setup_control_queue(&mut self) -> Result<(), KernelError> {
781        self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 0);
782
783        let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
784        if max_size == 0 {
785            return Err(KernelError::HardwareError {
786                device: "virtio-gpu",
787                code: 0x10,
788            });
789        }
790        let queue_size = max_size.min(256);
791
792        crate::println!(
793            "[VIRTIO-GPU] Control queue: max={}, using={}",
794            max_size,
795            queue_size
796        );
797
798        let (vq, dma, buffers) = self.allocate_virtqueue(queue_size)?;
799
800        // Tell device about the queue addresses
801        let desc_phys = dma.virt_addr as u64;
802        let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
803        let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
804        let avail_phys = desc_phys + avail_offset as u64;
805        let used_phys = desc_phys + used_offset as u64;
806
807        self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
808        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
809        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
810        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
811        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
812        self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
813        self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
814        self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
815
816        self.controlq = Some(vq);
817        self.ctrl_dma = Some(dma);
818        self.ctrl_buffers = buffers;
819
820        Ok(())
821    }
822
823    /// Set up the cursor virtqueue (queue index 1).
824    fn setup_cursor_queue(&mut self) -> Result<(), KernelError> {
825        self.write_reg(VIRTIO_MMIO_QUEUE_SEL, 1);
826
827        let max_size = self.read_reg(VIRTIO_MMIO_QUEUE_NUM_MAX) as u16;
828        if max_size == 0 {
829            crate::println!("[VIRTIO-GPU] Cursor queue not available (max_size=0)");
830            return Ok(()); // Cursor queue is optional
831        }
832        let queue_size = max_size.min(256);
833
834        crate::println!(
835            "[VIRTIO-GPU] Cursor queue: max={}, using={}",
836            max_size,
837            queue_size
838        );
839
840        let (vq, dma, buffers) = self.allocate_virtqueue(queue_size)?;
841
842        let desc_phys = dma.virt_addr as u64;
843        let avail_offset = (queue_size as usize) * core::mem::size_of::<VirtqDesc>();
844        let used_offset = avail_offset + 6 + 2 * (queue_size as usize);
845        let avail_phys = desc_phys + avail_offset as u64;
846        let used_phys = desc_phys + used_offset as u64;
847
848        self.write_reg(VIRTIO_MMIO_QUEUE_NUM, queue_size as u32);
849        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_LOW, desc_phys as u32);
850        self.write_reg(VIRTIO_MMIO_QUEUE_DESC_HIGH, (desc_phys >> 32) as u32);
851        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_LOW, avail_phys as u32);
852        self.write_reg(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (avail_phys >> 32) as u32);
853        self.write_reg(VIRTIO_MMIO_QUEUE_USED_LOW, used_phys as u32);
854        self.write_reg(VIRTIO_MMIO_QUEUE_USED_HIGH, (used_phys >> 32) as u32);
855        self.write_reg(VIRTIO_MMIO_QUEUE_READY, 1);
856
857        self.cursorq = Some(vq);
858        self.cursor_dma = Some(dma);
859        self.cursor_buffers = buffers;
860
861        Ok(())
862    }
863
864    /// Allocate a virtqueue: ring memory + per-descriptor data buffers.
865    fn allocate_virtqueue(
866        &self,
867        queue_size: u16,
868    ) -> Result<(Virtqueue, VirtqueueDmaRegion, Vec<DataBuffer>), KernelError> {
869        let qs = queue_size as usize;
870
871        // Calculate total ring memory needed:
872        //   descriptors: qs * 16 bytes
873        //   avail ring: 2+2 + qs*2 + 2 = 6 + 2*qs bytes
874        //   used ring: 2+2 + qs*8 + 2 = 6 + 8*qs bytes
875        let desc_size = qs * core::mem::size_of::<VirtqDesc>();
876        let avail_size = 6 + 2 * qs;
877        let used_size = 6 + 8 * qs;
878        let total_ring_bytes = desc_size + avail_size + used_size;
879        let ring_pages = total_ring_bytes.div_ceil(4096);
880
881        // Allocate pages for the ring structures.
882        // In a full implementation this would use the frame allocator for
883        // physically contiguous DMA memory. For now we use a zeroed Vec
884        // that is leaked to obtain 'static references.
885        let ring_mem = alloc::vec![0u8; ring_pages * 4096];
886        let ring_ptr = ring_mem.as_ptr() as usize;
887        // Leak the memory so it lives for 'static (device holds references)
888        core::mem::forget(ring_mem);
889
890        // Carve out descriptor table, avail ring, used ring
891        let desc_ptr = ring_ptr as *mut VirtqDesc;
892        let avail_ptr = (ring_ptr + desc_size) as *mut VirtqAvail;
893        let used_ptr = (ring_ptr + desc_size + avail_size) as *mut VirtqUsed;
894
895        // SAFETY: These pointers come from a just-allocated, zeroed region that
896        // is large enough and properly aligned (Vec guarantees alignment for u8).
897        // The region is leaked so it outlives the driver.
898        let descriptors = unsafe { core::slice::from_raw_parts_mut(desc_ptr, qs) };
899        let avail = unsafe { &mut *avail_ptr };
900        let used = unsafe { &mut *used_ptr };
901
902        let vq = Virtqueue::new(descriptors, avail, used, queue_size);
903
904        // Allocate per-descriptor data buffers (one 4KB page each)
905        let mut data_buffers = Vec::with_capacity(qs);
906        for _i in 0..qs {
907            let buf = alloc::vec![0u8; 4096];
908            let buf_virt = buf.as_ptr() as usize;
909            let buf_phys = buf_virt as u64; // Approximate for identity/offset mapping
910            core::mem::forget(buf);
911            data_buffers.push(DataBuffer {
912                virt_addr: buf_virt,
913                phys_addr: buf_phys,
914            });
915        }
916
917        let dma = VirtqueueDmaRegion {
918            virt_addr: ring_ptr,
919            num_pages: ring_pages,
920        };
921
922        Ok((vq, dma, data_buffers))
923    }
924
925    // ---- Command submission ----
926
927    /// Send a command via the control queue and wait for the response.
928    ///
929    /// The command is copied into a data buffer, submitted via the control
930    /// virtqueue as a two-descriptor chain (device-readable request +
931    /// device-writable response), and the driver polls the used ring for
932    /// completion.
933    ///
934    /// Returns the response header for status checking.
935    fn send_command_raw(
936        &mut self,
937        cmd_bytes: &[u8],
938        resp_len: usize,
939    ) -> Result<(VirtioGpuCtrlHdr, usize), KernelError> {
940        let mmio = self.mmio_base;
941
942        let controlq = self.controlq.as_mut().ok_or(KernelError::HardwareError {
943            device: "virtio-gpu",
944            code: 0x20,
945        })?;
946
947        // Allocate two descriptors: one for the request, one for the response
948        let req_desc_idx = controlq
949            .alloc_desc()
950            .ok_or(KernelError::ResourceExhausted {
951                resource: "virtio_gpu_ctrl_descriptors",
952            })?;
953
954        let resp_desc_idx = controlq.alloc_desc().ok_or_else(|| {
955            controlq.free_desc(req_desc_idx);
956            KernelError::ResourceExhausted {
957                resource: "virtio_gpu_ctrl_descriptors",
958            }
959        })?;
960
961        // Copy command data into the request buffer
962        if (req_desc_idx as usize) < self.ctrl_buffers.len()
963            && (resp_desc_idx as usize) < self.ctrl_buffers.len()
964        {
965            let req_buf_virt = self.ctrl_buffers[req_desc_idx as usize].virt_addr;
966            let req_buf_phys = self.ctrl_buffers[req_desc_idx as usize].phys_addr;
967            let resp_buf_virt = self.ctrl_buffers[resp_desc_idx as usize].virt_addr;
968            let resp_buf_phys = self.ctrl_buffers[resp_desc_idx as usize].phys_addr;
969
970            // SAFETY: req_buf_virt points to a leaked 4096-byte allocation.
971            // cmd_bytes.len() <= 4096 (checked by callers or bounded by protocol).
972            // We hold &mut self so no concurrent access.
973            let req_slice =
974                unsafe { core::slice::from_raw_parts_mut(req_buf_virt as *mut u8, 4096) };
975            let copy_len = cmd_bytes.len().min(4096);
976            req_slice[..copy_len].copy_from_slice(&cmd_bytes[..copy_len]);
977
978            // Zero the response buffer
979            // SAFETY: resp_buf_virt is a leaked 4096-byte allocation, same
980            // invariants as req_slice above.
981            let resp_slice =
982                unsafe { core::slice::from_raw_parts_mut(resp_buf_virt as *mut u8, 4096) };
983            resp_slice[..resp_len.min(4096)].fill(0);
984
985            // Set up request descriptor (device-readable, chained to response)
986            controlq.descriptors[req_desc_idx as usize] = VirtqDesc {
987                addr: req_buf_phys,
988                len: copy_len as u32,
989                flags: VIRTQ_DESC_F_NEXT,
990                next: resp_desc_idx,
991            };
992
993            // Set up response descriptor (device-writable)
994            controlq.descriptors[resp_desc_idx as usize] = VirtqDesc {
995                addr: resp_buf_phys,
996                len: resp_len.min(4096) as u32,
997                flags: VIRTQ_DESC_F_WRITE,
998                next: 0,
999            };
1000
1001            // Add the head of the chain to the available ring
1002            controlq.add_to_avail(req_desc_idx);
1003
1004            // Kick the device (control queue = index 0)
1005            // SAFETY: Writing to VirtIO queue notify register.
1006            unsafe {
1007                core::ptr::write_volatile((mmio + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32, 0);
1008            }
1009
1010            // Poll for completion (with timeout)
1011            let mut timeout = 1_000_000u32;
1012            loop {
1013                if let Some((_used_id, used_len)) = controlq.get_used() {
1014                    // Read response header from the response buffer
1015                    // SAFETY: resp_buf_virt is a valid, leaked 4096-byte buffer.
1016                    let resp_hdr = unsafe { *(resp_buf_virt as *const VirtioGpuCtrlHdr) };
1017
1018                    // Free both descriptors
1019                    controlq.free_desc(req_desc_idx);
1020                    controlq.free_desc(resp_desc_idx);
1021
1022                    return Ok((resp_hdr, used_len as usize));
1023                }
1024
1025                timeout -= 1;
1026                if timeout == 0 {
1027                    controlq.free_desc(req_desc_idx);
1028                    controlq.free_desc(resp_desc_idx);
1029                    return Err(KernelError::Timeout {
1030                        operation: "virtio_gpu_command",
1031                        duration_ms: 1000,
1032                    });
1033                }
1034
1035                core::hint::spin_loop();
1036            }
1037        } else {
1038            controlq.free_desc(req_desc_idx);
1039            controlq.free_desc(resp_desc_idx);
1040            Err(KernelError::ResourceExhausted {
1041                resource: "virtio_gpu_ctrl_buffers",
1042            })
1043        }
1044    }
1045
1046    /// Send a typed command and expect a simple OK_NODATA response.
1047    fn send_simple_command<T: Sized>(&mut self, cmd: &T) -> Result<(), KernelError> {
1048        // SAFETY: Reinterpreting a #[repr(C)] struct as a byte slice for
1049        // serialization. The struct is Sized and lives on the stack.
1050        let cmd_bytes = unsafe {
1051            core::slice::from_raw_parts(cmd as *const T as *const u8, core::mem::size_of::<T>())
1052        };
1053
1054        let (resp_hdr, _len) =
1055            self.send_command_raw(cmd_bytes, core::mem::size_of::<VirtioGpuCtrlHdr>())?;
1056
1057        if resp_hdr.hdr_type != VIRTIO_GPU_RESP_OK_NODATA {
1058            return Err(Self::response_to_error(resp_hdr.hdr_type));
1059        }
1060
1061        Ok(())
1062    }
1063
1064    /// Convert a VirtIO GPU response type to a KernelError.
1065    fn response_to_error(resp_type: u32) -> KernelError {
1066        match resp_type {
1067            VIRTIO_GPU_RESP_ERR_UNSPEC => KernelError::HardwareError {
1068                device: "virtio-gpu",
1069                code: 0x1200,
1070            },
1071            VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY => KernelError::OutOfMemory {
1072                requested: 0,
1073                available: 0,
1074            },
1075            VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID => KernelError::InvalidArgument {
1076                name: "scanout_id",
1077                value: "invalid",
1078            },
1079            VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID => KernelError::InvalidArgument {
1080                name: "resource_id",
1081                value: "invalid",
1082            },
1083            VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID => KernelError::InvalidArgument {
1084                name: "context_id",
1085                value: "invalid",
1086            },
1087            VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER => KernelError::InvalidArgument {
1088                name: "parameter",
1089                value: "invalid",
1090            },
1091            _ => KernelError::HardwareError {
1092                device: "virtio-gpu",
1093                code: resp_type,
1094            },
1095        }
1096    }
1097
1098    // ---- GPU commands ----
1099
1100    /// Query display information from the device.
1101    ///
1102    /// Returns the first enabled display mode (scanout 0 is preferred).
1103    pub fn get_display_info(&mut self) -> Result<VirtioGpuDisplayOne, KernelError> {
1104        let cmd = VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
1105        // SAFETY: Reinterpreting a #[repr(C)] VirtioGpuCtrlHdr as a byte slice.
1106        let cmd_bytes = unsafe {
1107            core::slice::from_raw_parts(
1108                &cmd as *const VirtioGpuCtrlHdr as *const u8,
1109                core::mem::size_of::<VirtioGpuCtrlHdr>(),
1110            )
1111        };
1112
1113        let resp_size = core::mem::size_of::<VirtioGpuRespDisplayInfo>();
1114        let (resp_hdr, _len) = self.send_command_raw(cmd_bytes, resp_size)?;
1115
1116        if resp_hdr.hdr_type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO {
1117            return Err(Self::response_to_error(resp_hdr.hdr_type));
1118        }
1119
1120        // Read the full response from the response buffer.
1121        // We need to re-read the response descriptor buffer to get the display
1122        // info payload.
1123        //
1124        // The response was written to the resp_desc buffer (second descriptor).
1125        // Since send_command_raw already freed the descriptors, we need to read
1126        // from the buffer that was used. We re-read it by examining the second
1127        // data buffer that was just used.
1128        //
1129        // A simpler approach: peek at the response buffer before freeing.
1130        // Since we already got resp_hdr, we need to read the full struct.
1131        // The response was in ctrl_buffers[resp_desc_idx]. However,
1132        // send_command_raw already freed the descriptors. We need a different
1133        // approach.
1134        //
1135        // Refactored: use send_command_raw_with_response for large responses.
1136        //
1137        // For now, re-send the command and capture the full response.
1138        self.get_display_info_internal()
1139    }
1140
1141    /// Internal implementation of get_display_info that captures the full
1142    /// response buffer.
1143    fn get_display_info_internal(&mut self) -> Result<VirtioGpuDisplayOne, KernelError> {
1144        let cmd = VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
1145        // SAFETY: Reinterpreting a #[repr(C)] VirtioGpuCtrlHdr as a byte slice.
1146        let cmd_bytes = unsafe {
1147            core::slice::from_raw_parts(
1148                &cmd as *const VirtioGpuCtrlHdr as *const u8,
1149                core::mem::size_of::<VirtioGpuCtrlHdr>(),
1150            )
1151        };
1152
1153        let resp_size = core::mem::size_of::<VirtioGpuRespDisplayInfo>();
1154        let mmio = self.mmio_base;
1155
1156        let controlq = self.controlq.as_mut().ok_or(KernelError::HardwareError {
1157            device: "virtio-gpu",
1158            code: 0x20,
1159        })?;
1160
1161        let req_desc_idx = controlq
1162            .alloc_desc()
1163            .ok_or(KernelError::ResourceExhausted {
1164                resource: "virtio_gpu_ctrl_descriptors",
1165            })?;
1166
1167        let resp_desc_idx = controlq.alloc_desc().ok_or_else(|| {
1168            controlq.free_desc(req_desc_idx);
1169            KernelError::ResourceExhausted {
1170                resource: "virtio_gpu_ctrl_descriptors",
1171            }
1172        })?;
1173
1174        if (req_desc_idx as usize) >= self.ctrl_buffers.len()
1175            || (resp_desc_idx as usize) >= self.ctrl_buffers.len()
1176        {
1177            controlq.free_desc(req_desc_idx);
1178            controlq.free_desc(resp_desc_idx);
1179            return Err(KernelError::ResourceExhausted {
1180                resource: "virtio_gpu_ctrl_buffers",
1181            });
1182        }
1183
1184        let req_buf_virt = self.ctrl_buffers[req_desc_idx as usize].virt_addr;
1185        let req_buf_phys = self.ctrl_buffers[req_desc_idx as usize].phys_addr;
1186        let resp_buf_virt = self.ctrl_buffers[resp_desc_idx as usize].virt_addr;
1187        let resp_buf_phys = self.ctrl_buffers[resp_desc_idx as usize].phys_addr;
1188
1189        // Copy command
1190        // SAFETY: req_buf_virt is a leaked 4096-byte DMA buffer. We hold &mut self.
1191        let req_slice = unsafe { core::slice::from_raw_parts_mut(req_buf_virt as *mut u8, 4096) };
1192        let copy_len = cmd_bytes.len().min(4096);
1193        req_slice[..copy_len].copy_from_slice(&cmd_bytes[..copy_len]);
1194
1195        // Zero response
1196        // SAFETY: resp_buf_virt is a leaked 4096-byte DMA buffer. We hold &mut self.
1197        let resp_slice = unsafe { core::slice::from_raw_parts_mut(resp_buf_virt as *mut u8, 4096) };
1198        resp_slice[..resp_size.min(4096)].fill(0);
1199
1200        // Set up descriptors
1201        controlq.descriptors[req_desc_idx as usize] = VirtqDesc {
1202            addr: req_buf_phys,
1203            len: copy_len as u32,
1204            flags: VIRTQ_DESC_F_NEXT,
1205            next: resp_desc_idx,
1206        };
1207        controlq.descriptors[resp_desc_idx as usize] = VirtqDesc {
1208            addr: resp_buf_phys,
1209            len: resp_size.min(4096) as u32,
1210            flags: VIRTQ_DESC_F_WRITE,
1211            next: 0,
1212        };
1213
1214        controlq.add_to_avail(req_desc_idx);
1215
1216        // Kick
1217        // SAFETY: Writing to VirtIO queue notify register at mapped MMIO address.
1218        unsafe {
1219            core::ptr::write_volatile((mmio + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32, 0);
1220        }
1221
1222        // Poll for completion
1223        let mut timeout = 1_000_000u32;
1224        loop {
1225            if let Some((_used_id, _used_len)) = controlq.get_used() {
1226                // Read the full response
1227                // SAFETY: resp_buf_virt is a valid leaked 4096-byte buffer,
1228                // and VirtioGpuRespDisplayInfo fits within it.
1229                let resp = unsafe { *(resp_buf_virt as *const VirtioGpuRespDisplayInfo) };
1230
1231                controlq.free_desc(req_desc_idx);
1232                controlq.free_desc(resp_desc_idx);
1233
1234                if resp.hdr.hdr_type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO {
1235                    return Err(Self::response_to_error(resp.hdr.hdr_type));
1236                }
1237
1238                // Find the first enabled display
1239                for i in 0..VIRTIO_GPU_MAX_SCANOUTS {
1240                    if resp.pmodes[i].enabled != 0 {
1241                        return Ok(resp.pmodes[i]);
1242                    }
1243                }
1244
1245                // No enabled display found -- use scanout 0 with defaults
1246                if resp.pmodes[0].rect.width > 0 && resp.pmodes[0].rect.height > 0 {
1247                    return Ok(resp.pmodes[0]);
1248                }
1249
1250                // Fallback defaults
1251                return Ok(VirtioGpuDisplayOne {
1252                    rect: VirtioGpuRect {
1253                        x: 0,
1254                        y: 0,
1255                        width: 1024,
1256                        height: 768,
1257                    },
1258                    enabled: 1,
1259                    flags: 0,
1260                });
1261            }
1262
1263            timeout -= 1;
1264            if timeout == 0 {
1265                controlq.free_desc(req_desc_idx);
1266                controlq.free_desc(resp_desc_idx);
1267                return Err(KernelError::Timeout {
1268                    operation: "virtio_gpu_get_display_info",
1269                    duration_ms: 1000,
1270                });
1271            }
1272
1273            core::hint::spin_loop();
1274        }
1275    }
1276
1277    /// Create a 2D resource on the host.
1278    pub fn create_resource_2d(
1279        &mut self,
1280        resource_id: u32,
1281        format: u32,
1282        width: u32,
1283        height: u32,
1284    ) -> Result<(), KernelError> {
1285        let cmd = VirtioGpuResourceCreate2d {
1286            hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
1287            resource_id,
1288            format,
1289            width,
1290            height,
1291        };
1292
1293        self.send_simple_command(&cmd)?;
1294
1295        crate::println!(
1296            "[VIRTIO-GPU] Created 2D resource {} ({}x{}, format={})",
1297            resource_id,
1298            width,
1299            height,
1300            format
1301        );
1302
1303        Ok(())
1304    }
1305
1306    /// Destroy a 2D resource on the host.
1307    pub fn resource_unref(&mut self, resource_id: u32) -> Result<(), KernelError> {
1308        let cmd = VirtioGpuResourceUnref {
1309            hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_UNREF),
1310            resource_id,
1311            padding: 0,
1312        };
1313
1314        self.send_simple_command(&cmd)?;
1315
1316        crate::println!("[VIRTIO-GPU] Destroyed resource {}", resource_id);
1317        Ok(())
1318    }
1319
1320    /// Attach backing store (guest memory) to a resource.
1321    ///
1322    /// The command includes a VirtioGpuMemEntry that describes the physical
1323    /// address and length of the backing memory.
1324    pub fn attach_backing(
1325        &mut self,
1326        resource_id: u32,
1327        addr: u64,
1328        length: u32,
1329    ) -> Result<(), KernelError> {
1330        // Build the combined command: attach_backing header + one mem entry
1331        // We need to send them as a single contiguous command buffer.
1332        #[repr(C)]
1333        #[derive(Clone, Copy)]
1334        struct AttachBackingWithEntry {
1335            cmd: VirtioGpuResourceAttachBacking,
1336            entry: VirtioGpuMemEntry,
1337        }
1338
1339        let combined = AttachBackingWithEntry {
1340            cmd: VirtioGpuResourceAttachBacking {
1341                hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
1342                resource_id,
1343                nr_entries: 1,
1344            },
1345            entry: VirtioGpuMemEntry {
1346                addr,
1347                length,
1348                padding: 0,
1349            },
1350        };
1351
1352        // SAFETY: Reinterpreting a #[repr(C)] struct as a byte slice for
1353        // serialization to the VirtIO control queue.
1354        let cmd_bytes = unsafe {
1355            core::slice::from_raw_parts(
1356                &combined as *const AttachBackingWithEntry as *const u8,
1357                core::mem::size_of::<AttachBackingWithEntry>(),
1358            )
1359        };
1360
1361        let (resp_hdr, _len) =
1362            self.send_command_raw(cmd_bytes, core::mem::size_of::<VirtioGpuCtrlHdr>())?;
1363
1364        if resp_hdr.hdr_type != VIRTIO_GPU_RESP_OK_NODATA {
1365            return Err(Self::response_to_error(resp_hdr.hdr_type));
1366        }
1367
1368        crate::println!(
1369            "[VIRTIO-GPU] Attached backing for resource {} (addr={:#x}, len={})",
1370            resource_id,
1371            addr,
1372            length
1373        );
1374
1375        Ok(())
1376    }
1377
1378    /// Detach backing store from a resource.
1379    pub fn detach_backing(&mut self, resource_id: u32) -> Result<(), KernelError> {
1380        let cmd = VirtioGpuResourceDetachBacking {
1381            hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
1382            resource_id,
1383            padding: 0,
1384        };
1385
1386        self.send_simple_command(&cmd)
1387    }
1388
1389    /// Set scanout: bind a resource (or region of it) to a display output.
1390    pub fn set_scanout(
1391        &mut self,
1392        scanout_id: u32,
1393        resource_id: u32,
1394        rect: VirtioGpuRect,
1395    ) -> Result<(), KernelError> {
1396        let cmd = VirtioGpuSetScanout {
1397            hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_SET_SCANOUT),
1398            rect,
1399            scanout_id,
1400            resource_id,
1401        };
1402
1403        self.send_simple_command(&cmd)?;
1404
1405        crate::println!(
1406            "[VIRTIO-GPU] Set scanout {}: resource {} ({}x{}+{}+{})",
1407            scanout_id,
1408            resource_id,
1409            rect.width,
1410            rect.height,
1411            rect.x,
1412            rect.y
1413        );
1414
1415        Ok(())
1416    }
1417
1418    /// Transfer data from guest backing store to host resource.
1419    pub fn transfer_to_host_2d(
1420        &mut self,
1421        resource_id: u32,
1422        rect: VirtioGpuRect,
1423    ) -> Result<(), KernelError> {
1424        let cmd = VirtioGpuTransferToHost2d {
1425            hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
1426            rect,
1427            offset: 0,
1428            resource_id,
1429            padding: 0,
1430        };
1431
1432        self.send_simple_command(&cmd)
1433    }
1434
1435    /// Flush a resource region to the display.
1436    pub fn resource_flush(
1437        &mut self,
1438        resource_id: u32,
1439        rect: VirtioGpuRect,
1440    ) -> Result<(), KernelError> {
1441        let cmd = VirtioGpuResourceFlush {
1442            hdr: VirtioGpuCtrlHdr::new(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
1443            rect,
1444            resource_id,
1445            padding: 0,
1446        };
1447
1448        self.send_simple_command(&cmd)
1449    }
1450
1451    // ---- Framebuffer management ----
1452
1453    /// Set up the primary framebuffer: create a 2D resource, attach a
1454    /// backing pixel buffer, and bind it to scanout 0.
1455    pub fn setup_framebuffer(&mut self) -> Result<(), KernelError> {
1456        if self.width == 0 || self.height == 0 {
1457            return Err(KernelError::InvalidState {
1458                expected: "display_configured",
1459                actual: "no_display",
1460            });
1461        }
1462
1463        let resource_id = self.next_resource_id;
1464        self.next_resource_id += 1;
1465
1466        // Create a 2D resource for the framebuffer
1467        self.create_resource_2d(resource_id, FORMAT_B8G8R8X8_UNORM, self.width, self.height)?;
1468
1469        // Allocate backing pixel buffer
1470        let pixel_count = (self.width * self.height) as usize;
1471        let mut backing = alloc::vec![0u32; pixel_count];
1472
1473        // Fill with a dark blue gradient as initial content
1474        for y in 0..self.height {
1475            for x in 0..self.width {
1476                let idx = (y * self.width + x) as usize;
1477                // BGRX format: blue gradient from dark to mid
1478                let blue = 32 + (y * 64 / self.height);
1479                let green = 16 + (y * 32 / self.height);
1480                backing[idx] = (blue << 16) | (green << 8) | 0x10;
1481            }
1482        }
1483
1484        // Get the physical address of the backing buffer
1485        let backing_addr = backing.as_ptr() as u64;
1486        let backing_len = (pixel_count * 4) as u32;
1487
1488        // Attach the backing store
1489        self.attach_backing(resource_id, backing_addr, backing_len)?;
1490
1491        // Bind to scanout 0
1492        let scanout_rect = VirtioGpuRect::new(0, 0, self.width, self.height);
1493        self.set_scanout(0, resource_id, scanout_rect)?;
1494
1495        // Transfer initial content to host
1496        self.transfer_to_host_2d(resource_id, scanout_rect)?;
1497
1498        // Flush to display
1499        self.resource_flush(resource_id, scanout_rect)?;
1500
1501        self.framebuffer_resource_id = resource_id;
1502        self.framebuffer_backing = Some(backing);
1503
1504        crate::println!(
1505            "[VIRTIO-GPU] Framebuffer ready: {}x{} (resource {})",
1506            self.width,
1507            self.height,
1508            resource_id
1509        );
1510
1511        Ok(())
1512    }
1513
1514    /// Flush the framebuffer to the display.
1515    ///
1516    /// Transfers the entire backing buffer to the host and triggers a display
1517    /// refresh. Call this after modifying the framebuffer pixels.
1518    pub fn flush_framebuffer(&mut self) -> Result<(), KernelError> {
1519        if self.framebuffer_resource_id == 0 {
1520            return Err(KernelError::InvalidState {
1521                expected: "framebuffer_setup",
1522                actual: "no_framebuffer",
1523            });
1524        }
1525
1526        let rect = VirtioGpuRect::new(0, 0, self.width, self.height);
1527        self.transfer_to_host_2d(self.framebuffer_resource_id, rect)?;
1528        self.resource_flush(self.framebuffer_resource_id, rect)?;
1529
1530        Ok(())
1531    }
1532
1533    /// Flush a sub-region of the framebuffer.
1534    ///
1535    /// More efficient than flushing the entire framebuffer when only a small
1536    /// area has changed.
1537    pub fn flush_region(&mut self, rect: VirtioGpuRect) -> Result<(), KernelError> {
1538        if self.framebuffer_resource_id == 0 {
1539            return Err(KernelError::InvalidState {
1540                expected: "framebuffer_setup",
1541                actual: "no_framebuffer",
1542            });
1543        }
1544
1545        self.transfer_to_host_2d(self.framebuffer_resource_id, rect)?;
1546        self.resource_flush(self.framebuffer_resource_id, rect)?;
1547
1548        Ok(())
1549    }
1550
1551    /// Get mutable access to the framebuffer pixel buffer.
1552    ///
1553    /// Returns a slice of BGRX pixels. Modify the pixels, then call
1554    /// `flush_framebuffer()` or `flush_region()` to push changes to the
1555    /// display.
1556    pub fn get_framebuffer_mut(&mut self) -> Option<&mut [u32]> {
1557        self.framebuffer_backing.as_deref_mut()
1558    }
1559
1560    /// Get read-only access to the framebuffer pixel buffer.
1561    pub fn get_framebuffer(&self) -> Option<&[u32]> {
1562        self.framebuffer_backing.as_deref()
1563    }
1564
1565    /// Get the display width in pixels.
1566    pub fn width(&self) -> u32 {
1567        self.width
1568    }
1569
1570    /// Get the display height in pixels.
1571    pub fn height(&self) -> u32 {
1572        self.height
1573    }
1574
1575    /// Check if the driver is initialized and ready.
1576    pub fn is_ready(&self) -> bool {
1577        self.state == GpuDeviceState::Ready
1578    }
1579
1580    /// Check if EDID is supported.
1581    pub fn supports_edid(&self) -> bool {
1582        self.features & VIRTIO_GPU_F_EDID != 0
1583    }
1584
1585    /// Check if 3D (VIRGL) is supported.
1586    pub fn supports_virgl(&self) -> bool {
1587        self.features & VIRTIO_GPU_F_VIRGL != 0
1588    }
1589
1590    /// Get the framebuffer resource ID.
1591    pub fn framebuffer_resource_id(&self) -> u32 {
1592        self.framebuffer_resource_id
1593    }
1594
1595    /// Allocate a new resource ID.
1596    pub fn alloc_resource_id(&mut self) -> u32 {
1597        let id = self.next_resource_id;
1598        self.next_resource_id += 1;
1599        id
1600    }
1601
1602    /// Set a pixel in the framebuffer (BGRX format).
1603    ///
1604    /// Does NOT flush automatically -- call `flush_framebuffer()` after
1605    /// modifying pixels.
1606    pub fn set_pixel(&mut self, x: u32, y: u32, color: u32) -> Result<(), KernelError> {
1607        if x >= self.width || y >= self.height {
1608            return Err(KernelError::InvalidArgument {
1609                name: "coordinates",
1610                value: "out_of_bounds",
1611            });
1612        }
1613
1614        if let Some(ref mut fb) = self.framebuffer_backing {
1615            let idx = (y * self.width + x) as usize;
1616            if idx < fb.len() {
1617                fb[idx] = color;
1618            }
1619        }
1620
1621        Ok(())
1622    }
1623
1624    /// Fill a rectangle in the framebuffer with a solid color.
1625    ///
1626    /// Does NOT flush automatically.
1627    pub fn fill_rect(
1628        &mut self,
1629        x: u32,
1630        y: u32,
1631        w: u32,
1632        h: u32,
1633        color: u32,
1634    ) -> Result<(), KernelError> {
1635        if let Some(ref mut fb) = self.framebuffer_backing {
1636            let width = self.width;
1637            let height = self.height;
1638
1639            for dy in 0..h {
1640                let row_y = y + dy;
1641                if row_y >= height {
1642                    break;
1643                }
1644                for dx in 0..w {
1645                    let col_x = x + dx;
1646                    if col_x >= width {
1647                        break;
1648                    }
1649                    let idx = (row_y * width + col_x) as usize;
1650                    if idx < fb.len() {
1651                        fb[idx] = color;
1652                    }
1653                }
1654            }
1655        }
1656
1657        Ok(())
1658    }
1659
1660    /// Blit a buffer of pixels into the framebuffer.
1661    ///
1662    /// The buffer must contain `w * h` BGRX pixels. Does NOT flush
1663    /// automatically.
1664    pub fn blit(
1665        &mut self,
1666        buffer: &[u32],
1667        x: u32,
1668        y: u32,
1669        w: u32,
1670        h: u32,
1671    ) -> Result<(), KernelError> {
1672        if let Some(ref mut fb) = self.framebuffer_backing {
1673            let fb_width = self.width;
1674            let fb_height = self.height;
1675
1676            for dy in 0..h {
1677                let row_y = y + dy;
1678                if row_y >= fb_height {
1679                    break;
1680                }
1681                for dx in 0..w {
1682                    let col_x = x + dx;
1683                    if col_x >= fb_width {
1684                        break;
1685                    }
1686                    let src_idx = (dy * w + dx) as usize;
1687                    let dst_idx = (row_y * fb_width + col_x) as usize;
1688                    if src_idx < buffer.len() && dst_idx < fb.len() {
1689                        fb[dst_idx] = buffer[src_idx];
1690                    }
1691                }
1692            }
1693        }
1694
1695        Ok(())
1696    }
1697
1698    /// Clear the framebuffer with a solid color.
1699    ///
1700    /// Does NOT flush automatically.
1701    pub fn clear(&mut self, color: u32) {
1702        if let Some(ref mut fb) = self.framebuffer_backing {
1703            fb.fill(color);
1704        }
1705    }
1706}
1707
1708// ============================================================================
1709// PCI Discovery (x86_64 only)
1710// ============================================================================
1711
1712/// Probe PCI bus for a VirtIO GPU device (vendor 0x1AF4, device 0x1050).
1713///
1714/// Returns the MMIO base address (virtual) if found.
1715#[cfg(target_arch = "x86_64")]
1716pub fn probe_pci() -> Option<usize> {
1717    if !crate::drivers::pci::is_pci_initialized() {
1718        return None;
1719    }
1720
1721    let bus = crate::drivers::pci::get_pci_bus().lock();
1722
1723    // VirtIO GPU: vendor 0x1AF4 (Red Hat), device 0x1050 (virtio 1.0 GPU)
1724    let devices = bus.find_devices_by_id(0x1AF4, 0x1050);
1725
1726    if let Some(dev) = devices.first() {
1727        crate::println!(
1728            "[VIRTIO-GPU] Found PCI device {:04x}:{:04x} at {:02x}:{:02x}.{}",
1729            dev.vendor_id,
1730            dev.device_id,
1731            dev.location.bus,
1732            dev.location.device,
1733            dev.location.function
1734        );
1735
1736        // Get BAR0 MMIO address
1737        if let Some(bar) = dev.bars.first() {
1738            if let Some(phys_addr) = bar.get_memory_address() {
1739                crate::println!("[VIRTIO-GPU] BAR0 physical address: {:#x}", phys_addr);
1740                // Convert physical to virtual address
1741                return crate::arch::x86_64::msr::phys_to_virt(phys_addr as usize);
1742            }
1743        }
1744
1745        crate::println!("[VIRTIO-GPU] No usable BAR found");
1746    }
1747
1748    // Also try the transitional device ID (0x1040 + device_type where
1749    // gpu device_type = 16)
1750    let legacy_devices = bus.find_devices_by_id(0x1AF4, 0x1040);
1751    for dev in &legacy_devices {
1752        // For transitional devices, subsystem device ID indicates the type
1753        crate::println!(
1754            "[VIRTIO-GPU] Found transitional VirtIO PCI device {:04x}:{:04x}",
1755            dev.vendor_id,
1756            dev.device_id
1757        );
1758
1759        if let Some(bar) = dev.bars.first() {
1760            if let Some(phys_addr) = bar.get_memory_address() {
1761                return crate::arch::x86_64::msr::phys_to_virt(phys_addr as usize);
1762            }
1763        }
1764    }
1765
1766    None
1767}
1768
1769/// Stub for non-x86_64 architectures.
1770#[cfg(not(target_arch = "x86_64"))]
1771pub fn probe_pci() -> Option<usize> {
1772    None
1773}
1774
1775/// Probe PCI bus for display-class devices and return a summary.
1776#[cfg(target_arch = "x86_64")]
1777pub fn enumerate_gpu_devices() -> Vec<(u16, u16, u8, u8)> {
1778    let mut result = Vec::new();
1779
1780    if !crate::drivers::pci::is_pci_initialized() {
1781        return result;
1782    }
1783
1784    let bus = crate::drivers::pci::get_pci_bus().lock();
1785    let display_devices = bus.find_devices_by_class(crate::drivers::pci::class_codes::DISPLAY);
1786
1787    for dev in &display_devices {
1788        result.push((dev.vendor_id, dev.device_id, dev.class_code, dev.subclass));
1789    }
1790
1791    result
1792}
1793
1794/// Stub for non-x86_64 architectures.
1795#[cfg(not(target_arch = "x86_64"))]
1796pub fn enumerate_gpu_devices() -> Vec<(u16, u16, u8, u8)> {
1797    Vec::new()
1798}
1799
1800// ============================================================================
1801// Module-level state and initialization
1802// ============================================================================
1803
1804/// Global VirtIO GPU driver instance.
1805static VIRTIO_GPU: spin::Mutex<Option<VirtioGpuDriver>> = spin::Mutex::new(None);
1806
1807/// Initialize the VirtIO GPU driver.
1808///
1809/// Probes PCI for a virtio-gpu device. If found, initializes the driver,
1810/// queries display info, and sets up a framebuffer.
1811pub fn init() -> Result<(), KernelError> {
1812    crate::println!("[VIRTIO-GPU] Probing for virtio-gpu device...");
1813
1814    // Try PCI discovery
1815    if let Some(mmio_base) = probe_pci() {
1816        crate::println!("[VIRTIO-GPU] MMIO base: {:#x}", mmio_base);
1817
1818        match VirtioGpuDriver::new(mmio_base) {
1819            Ok(driver) => {
1820                crate::println!(
1821                    "[VIRTIO-GPU] Driver initialized: {}x{} (resource {})",
1822                    driver.width(),
1823                    driver.height(),
1824                    driver.framebuffer_resource_id()
1825                );
1826                *VIRTIO_GPU.lock() = Some(driver);
1827                return Ok(());
1828            }
1829            Err(e) => {
1830                crate::println!("[VIRTIO-GPU] Init failed: {:?}", e);
1831                return Err(e);
1832            }
1833        }
1834    }
1835
1836    crate::println!("[VIRTIO-GPU] No virtio-gpu device found");
1837    Ok(())
1838}
1839
1840/// Execute a closure with the VirtIO GPU driver (mutable access).
1841pub fn with_driver<R, F: FnOnce(&mut VirtioGpuDriver) -> R>(f: F) -> Option<R> {
1842    VIRTIO_GPU.lock().as_mut().map(f)
1843}
1844
1845/// Check if a VirtIO GPU driver is available and initialized.
1846pub fn is_available() -> bool {
1847    VIRTIO_GPU.lock().is_some()
1848}
1849
1850/// Flush the VirtIO GPU framebuffer to the display.
1851///
1852/// Convenience function that acquires the driver lock and flushes.
1853pub fn flush_framebuffer() -> Result<(), KernelError> {
1854    if let Some(ref mut driver) = *VIRTIO_GPU.lock() {
1855        driver.flush_framebuffer()
1856    } else {
1857        Err(KernelError::InvalidState {
1858            expected: "virtio_gpu_initialized",
1859            actual: "no_driver",
1860        })
1861    }
1862}
1863
1864/// Get the display dimensions (width, height) if a VirtIO GPU is available.
1865pub fn get_display_size() -> Option<(u32, u32)> {
1866    VIRTIO_GPU.lock().as_ref().map(|d| (d.width(), d.height()))
1867}
1868
1869#[cfg(test)]
1870mod tests {
1871    use super::*;
1872
1873    #[test]
1874    fn test_gpu_constants() {
1875        assert_eq!(VIRTIO_GPU_CMD_GET_DISPLAY_INFO, 0x100);
1876        assert_eq!(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, 0x101);
1877        assert_eq!(VIRTIO_GPU_RESP_OK_NODATA, 0x1100);
1878        assert_eq!(VIRTIO_GPU_RESP_ERR_UNSPEC, 0x1200);
1879        assert_eq!(FORMAT_B8G8R8A8_UNORM, 1);
1880    }
1881
1882    #[test]
1883    fn test_ctrl_hdr_size() {
1884        // VirtIO spec: control header is 24 bytes
1885        assert_eq!(core::mem::size_of::<VirtioGpuCtrlHdr>(), 24);
1886    }
1887
1888    #[test]
1889    fn test_rect() {
1890        let rect = VirtioGpuRect::new(10, 20, 800, 600);
1891        assert_eq!(rect.x, 10);
1892        assert_eq!(rect.y, 20);
1893        assert_eq!(rect.width, 800);
1894        assert_eq!(rect.height, 600);
1895    }
1896
1897    #[test]
1898    fn test_display_one_size() {
1899        // VirtioGpuDisplayOne: rect (16) + enabled (4) + flags (4) = 24
1900        assert_eq!(core::mem::size_of::<VirtioGpuDisplayOne>(), 24);
1901    }
1902
1903    #[test]
1904    fn test_resource_create_2d_size() {
1905        // hdr (24) + resource_id (4) + format (4) + width (4) + height (4) = 40
1906        assert_eq!(core::mem::size_of::<VirtioGpuResourceCreate2d>(), 40);
1907    }
1908
1909    #[test]
1910    fn test_mem_entry_size() {
1911        // addr (8) + length (4) + padding (4) = 16
1912        assert_eq!(core::mem::size_of::<VirtioGpuMemEntry>(), 16);
1913    }
1914
1915    #[test]
1916    fn test_response_to_error() {
1917        let err = VirtioGpuDriver::response_to_error(VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY);
1918        match err {
1919            KernelError::OutOfMemory { .. } => {}
1920            _ => panic!("Expected OutOfMemory error"),
1921        }
1922
1923        let err = VirtioGpuDriver::response_to_error(VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID);
1924        match err {
1925            KernelError::InvalidArgument { name, .. } => {
1926                assert_eq!(name, "scanout_id");
1927            }
1928            _ => panic!("Expected InvalidArgument error"),
1929        }
1930    }
1931}